Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.796
      1 /*	$NetBSD: if_wm.c,v 1.796 2024/01/29 06:05:11 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.796 2024/01/29 06:05:11 msaitoh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_if_wm.h"
     89 #endif
     90 
     91 #include <sys/param.h>
     92 
     93 #include <sys/atomic.h>
     94 #include <sys/callout.h>
     95 #include <sys/cpu.h>
     96 #include <sys/device.h>
     97 #include <sys/errno.h>
     98 #include <sys/interrupt.h>
     99 #include <sys/ioctl.h>
    100 #include <sys/kernel.h>
    101 #include <sys/kmem.h>
    102 #include <sys/mbuf.h>
    103 #include <sys/pcq.h>
    104 #include <sys/queue.h>
    105 #include <sys/rndsource.h>
    106 #include <sys/socket.h>
    107 #include <sys/sysctl.h>
    108 #include <sys/syslog.h>
    109 #include <sys/systm.h>
    110 #include <sys/workqueue.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <net/rss_config.h>
    120 
    121 #include <netinet/in.h>			/* XXX for struct ip */
    122 #include <netinet/in_systm.h>		/* XXX for struct ip */
    123 #include <netinet/ip.h>			/* XXX for struct ip */
    124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    126 
    127 #include <sys/bus.h>
    128 #include <sys/intr.h>
    129 #include <machine/endian.h>
    130 
    131 #include <dev/mii/mii.h>
    132 #include <dev/mii/mdio.h>
    133 #include <dev/mii/miivar.h>
    134 #include <dev/mii/miidevs.h>
    135 #include <dev/mii/mii_bitbang.h>
    136 #include <dev/mii/ikphyreg.h>
    137 #include <dev/mii/igphyreg.h>
    138 #include <dev/mii/igphyvar.h>
    139 #include <dev/mii/inbmphyreg.h>
    140 #include <dev/mii/ihphyreg.h>
    141 #include <dev/mii/makphyreg.h>
    142 
    143 #include <dev/pci/pcireg.h>
    144 #include <dev/pci/pcivar.h>
    145 #include <dev/pci/pcidevs.h>
    146 
    147 #include <dev/pci/if_wmreg.h>
    148 #include <dev/pci/if_wmvar.h>
    149 
    150 #ifdef WM_DEBUG
    151 #define	WM_DEBUG_LINK		__BIT(0)
    152 #define	WM_DEBUG_TX		__BIT(1)
    153 #define	WM_DEBUG_RX		__BIT(2)
    154 #define	WM_DEBUG_GMII		__BIT(3)
    155 #define	WM_DEBUG_MANAGE		__BIT(4)
    156 #define	WM_DEBUG_NVM		__BIT(5)
    157 #define	WM_DEBUG_INIT		__BIT(6)
    158 #define	WM_DEBUG_LOCK		__BIT(7)
    159 
    160 #if 0
    161 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
    162 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
    163 	WM_DEBUG_LOCK
    164 #endif
    165 
    166 #define	DPRINTF(sc, x, y)			  \
    167 	do {					  \
    168 		if ((sc)->sc_debug & (x))	  \
    169 			printf y;		  \
    170 	} while (0)
    171 #else
    172 #define	DPRINTF(sc, x, y)	__nothing
    173 #endif /* WM_DEBUG */
    174 
    175 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    176 
    177 /*
    178  * This device driver's max interrupt numbers.
    179  */
    180 #define WM_MAX_NQUEUEINTR	16
    181 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    182 
    183 #ifndef WM_DISABLE_MSI
    184 #define	WM_DISABLE_MSI 0
    185 #endif
    186 #ifndef WM_DISABLE_MSIX
    187 #define	WM_DISABLE_MSIX 0
    188 #endif
    189 
    190 int wm_disable_msi = WM_DISABLE_MSI;
    191 int wm_disable_msix = WM_DISABLE_MSIX;
    192 
    193 #ifndef WM_WATCHDOG_TIMEOUT
    194 #define WM_WATCHDOG_TIMEOUT 5
    195 #endif
    196 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    197 
    198 /*
    199  * Transmit descriptor list size.  Due to errata, we can only have
    200  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    201  * on >= 82544. We tell the upper layers that they can queue a lot
    202  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    203  * of them at a time.
    204  *
    205  * We allow up to 64 DMA segments per packet.  Pathological packet
    206  * chains containing many small mbufs have been observed in zero-copy
    207  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    208  * m_defrag() is called to reduce it.
    209  */
    210 #define	WM_NTXSEGS		64
    211 #define	WM_IFQUEUELEN		256
    212 #define	WM_TXQUEUELEN_MAX	64
    213 #define	WM_TXQUEUELEN_MAX_82547	16
    214 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    215 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    216 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    217 #define	WM_NTXDESC_82542	256
    218 #define	WM_NTXDESC_82544	4096
    219 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    220 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    221 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    222 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    223 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    224 
    225 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    226 
    227 #define	WM_TXINTERQSIZE		256
    228 
    229 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    230 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    231 #endif
    232 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    233 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    234 #endif
    235 
    236 /*
    237  * Receive descriptor list size.  We have one Rx buffer for normal
    238  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    239  * packet.  We allocate 256 receive descriptors, each with a 2k
    240  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    241  */
    242 #define	WM_NRXDESC		256U
    243 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    244 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    245 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    246 
    247 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    248 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    249 #endif
    250 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    251 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    252 #endif
    253 
    254 typedef union txdescs {
    255 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    256 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    257 } txdescs_t;
    258 
    259 typedef union rxdescs {
    260 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    261 	ext_rxdesc_t	 sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    262 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    263 } rxdescs_t;
    264 
    265 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    266 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    267 
    268 /*
    269  * Software state for transmit jobs.
    270  */
    271 struct wm_txsoft {
    272 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    273 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    274 	int txs_firstdesc;		/* first descriptor in packet */
    275 	int txs_lastdesc;		/* last descriptor in packet */
    276 	int txs_ndesc;			/* # of descriptors used */
    277 };
    278 
    279 /*
    280  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    281  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    282  * them together.
    283  */
    284 struct wm_rxsoft {
    285 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    286 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    287 };
    288 
    289 #define WM_LINKUP_TIMEOUT	50
    290 
    291 static uint16_t swfwphysem[] = {
    292 	SWFW_PHY0_SM,
    293 	SWFW_PHY1_SM,
    294 	SWFW_PHY2_SM,
    295 	SWFW_PHY3_SM
    296 };
    297 
    298 static const uint32_t wm_82580_rxpbs_table[] = {
    299 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    300 };
    301 
    302 struct wm_softc;
    303 
    304 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
    305 #if !defined(WM_EVENT_COUNTERS)
    306 #define WM_EVENT_COUNTERS 1
    307 #endif
    308 #endif
    309 
    310 #ifdef WM_EVENT_COUNTERS
    311 #define WM_Q_EVCNT_DEFINE(qname, evname)				 \
    312 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    313 	struct evcnt qname##_ev_##evname
    314 
    315 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    316 	do {								\
    317 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    318 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    319 		    "%s%02d%s", #qname, (qnum), #evname);		\
    320 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    321 		    (evtype), NULL, (xname),				\
    322 		    (q)->qname##_##evname##_evcnt_name);		\
    323 	} while (0)
    324 
    325 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    326 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    327 
    328 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    329 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    330 
    331 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    332 	evcnt_detach(&(q)->qname##_ev_##evname)
    333 #endif /* WM_EVENT_COUNTERS */
    334 
    335 struct wm_txqueue {
    336 	kmutex_t *txq_lock;		/* lock for tx operations */
    337 
    338 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    339 
    340 	/* Software state for the transmit descriptors. */
    341 	int txq_num;			/* must be a power of two */
    342 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    343 
    344 	/* TX control data structures. */
    345 	int txq_ndesc;			/* must be a power of two */
    346 	size_t txq_descsize;		/* a tx descriptor size */
    347 	txdescs_t *txq_descs_u;
    348 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    349 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    350 	int txq_desc_rseg;		/* real number of control segment */
    351 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    352 #define	txq_descs	txq_descs_u->sctxu_txdescs
    353 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    354 
    355 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    356 
    357 	int txq_free;			/* number of free Tx descriptors */
    358 	int txq_next;			/* next ready Tx descriptor */
    359 
    360 	int txq_sfree;			/* number of free Tx jobs */
    361 	int txq_snext;			/* next free Tx job */
    362 	int txq_sdirty;			/* dirty Tx jobs */
    363 
    364 	/* These 4 variables are used only on the 82547. */
    365 	int txq_fifo_size;		/* Tx FIFO size */
    366 	int txq_fifo_head;		/* current head of FIFO */
    367 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    368 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    369 
    370 	/*
    371 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    372 	 * CPUs. This queue intermediate them without block.
    373 	 */
    374 	pcq_t *txq_interq;
    375 
    376 	/*
    377 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    378 	 * to manage Tx H/W queue's busy flag.
    379 	 */
    380 	int txq_flags;			/* flags for H/W queue, see below */
    381 #define	WM_TXQ_NO_SPACE		0x1
    382 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
    383 
    384 	bool txq_stopping;
    385 
    386 	bool txq_sending;
    387 	time_t txq_lastsent;
    388 
    389 	/* Checksum flags used for previous packet */
    390 	uint32_t	txq_last_hw_cmd;
    391 	uint8_t		txq_last_hw_fields;
    392 	uint16_t	txq_last_hw_ipcs;
    393 	uint16_t	txq_last_hw_tucs;
    394 
    395 	uint32_t txq_packets;		/* for AIM */
    396 	uint32_t txq_bytes;		/* for AIM */
    397 #ifdef WM_EVENT_COUNTERS
    398 	/* TX event counters */
    399 	WM_Q_EVCNT_DEFINE(txq, txsstall);   /* Stalled due to no txs */
    400 	WM_Q_EVCNT_DEFINE(txq, txdstall);   /* Stalled due to no txd */
    401 	WM_Q_EVCNT_DEFINE(txq, fifo_stall); /* FIFO stalls (82547) */
    402 	WM_Q_EVCNT_DEFINE(txq, txdw);	    /* Tx descriptor interrupts */
    403 	WM_Q_EVCNT_DEFINE(txq, txqe);	    /* Tx queue empty interrupts */
    404 					    /* XXX not used? */
    405 
    406 	WM_Q_EVCNT_DEFINE(txq, ipsum);	    /* IP checksums comp. */
    407 	WM_Q_EVCNT_DEFINE(txq, tusum);	    /* TCP/UDP cksums comp. */
    408 	WM_Q_EVCNT_DEFINE(txq, tusum6);	    /* TCP/UDP v6 cksums comp. */
    409 	WM_Q_EVCNT_DEFINE(txq, tso);	    /* TCP seg offload (IPv4) */
    410 	WM_Q_EVCNT_DEFINE(txq, tso6);	    /* TCP seg offload (IPv6) */
    411 	WM_Q_EVCNT_DEFINE(txq, tsopain);    /* Painful header manip. for TSO */
    412 	WM_Q_EVCNT_DEFINE(txq, pcqdrop);    /* Pkt dropped in pcq */
    413 	WM_Q_EVCNT_DEFINE(txq, descdrop);   /* Pkt dropped in MAC desc ring */
    414 					    /* other than toomanyseg */
    415 
    416 	WM_Q_EVCNT_DEFINE(txq, toomanyseg); /* Pkt dropped(toomany DMA segs) */
    417 	WM_Q_EVCNT_DEFINE(txq, defrag);	    /* m_defrag() */
    418 	WM_Q_EVCNT_DEFINE(txq, underrun);   /* Tx underrun */
    419 	WM_Q_EVCNT_DEFINE(txq, skipcontext); /* Tx skip wrong cksum context */
    420 
    421 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    422 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    423 #endif /* WM_EVENT_COUNTERS */
    424 };
    425 
    426 struct wm_rxqueue {
    427 	kmutex_t *rxq_lock;		/* lock for rx operations */
    428 
    429 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    430 
    431 	/* Software state for the receive descriptors. */
    432 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    433 
    434 	/* RX control data structures. */
    435 	int rxq_ndesc;			/* must be a power of two */
    436 	size_t rxq_descsize;		/* a rx descriptor size */
    437 	rxdescs_t *rxq_descs_u;
    438 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    439 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    440 	int rxq_desc_rseg;		/* real number of control segment */
    441 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    442 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    443 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    444 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    445 
    446 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    447 
    448 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    449 	int rxq_discard;
    450 	int rxq_len;
    451 	struct mbuf *rxq_head;
    452 	struct mbuf *rxq_tail;
    453 	struct mbuf **rxq_tailp;
    454 
    455 	bool rxq_stopping;
    456 
    457 	uint32_t rxq_packets;		/* for AIM */
    458 	uint32_t rxq_bytes;		/* for AIM */
    459 #ifdef WM_EVENT_COUNTERS
    460 	/* RX event counters */
    461 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    462 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    463 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    464 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    465 	WM_Q_EVCNT_DEFINE(rxq, qdrop);	/* Rx queue drop packet */
    466 #endif
    467 };
    468 
    469 struct wm_queue {
    470 	int wmq_id;			/* index of TX/RX queues */
    471 	int wmq_intr_idx;		/* index of MSI-X tables */
    472 
    473 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    474 	bool wmq_set_itr;
    475 
    476 	struct wm_txqueue wmq_txq;
    477 	struct wm_rxqueue wmq_rxq;
    478 	char sysctlname[32];		/* Name for sysctl */
    479 
    480 	bool wmq_txrx_use_workqueue;
    481 	bool wmq_wq_enqueued;
    482 	struct work wmq_cookie;
    483 	void *wmq_si;
    484 };
    485 
    486 struct wm_phyop {
    487 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
    488 	void (*release)(struct wm_softc *);
    489 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    490 	int (*writereg_locked)(device_t, int, int, uint16_t);
    491 	int reset_delay_us;
    492 	bool no_errprint;
    493 };
    494 
    495 struct wm_nvmop {
    496 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
    497 	void (*release)(struct wm_softc *);
    498 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    499 };
    500 
    501 /*
    502  * Software state per device.
    503  */
    504 struct wm_softc {
    505 	device_t sc_dev;		/* generic device information */
    506 	bus_space_tag_t sc_st;		/* bus space tag */
    507 	bus_space_handle_t sc_sh;	/* bus space handle */
    508 	bus_size_t sc_ss;		/* bus space size */
    509 	bus_space_tag_t sc_iot;		/* I/O space tag */
    510 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    511 	bus_size_t sc_ios;		/* I/O space size */
    512 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    513 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    514 	bus_size_t sc_flashs;		/* flash registers space size */
    515 	off_t sc_flashreg_offset;	/*
    516 					 * offset to flash registers from
    517 					 * start of BAR
    518 					 */
    519 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    520 
    521 	struct ethercom sc_ethercom;	/* Ethernet common data */
    522 	struct mii_data sc_mii;		/* MII/media information */
    523 
    524 	pci_chipset_tag_t sc_pc;
    525 	pcitag_t sc_pcitag;
    526 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    527 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    528 
    529 	uint16_t sc_pcidevid;		/* PCI device ID */
    530 	wm_chip_type sc_type;		/* MAC type */
    531 	int sc_rev;			/* MAC revision */
    532 	wm_phy_type sc_phytype;		/* PHY type */
    533 	uint8_t sc_sfptype;		/* SFP type */
    534 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    535 #define	WM_MEDIATYPE_UNKNOWN		0x00
    536 #define	WM_MEDIATYPE_FIBER		0x01
    537 #define	WM_MEDIATYPE_COPPER		0x02
    538 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    539 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    540 	u_int sc_flags;			/* flags; see below */
    541 	u_short sc_if_flags;		/* last if_flags */
    542 	int sc_ec_capenable;		/* last ec_capenable */
    543 	int sc_flowflags;		/* 802.3x flow control flags */
    544 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    545 	int sc_align_tweak;
    546 
    547 	void *sc_ihs[WM_MAX_NINTR];	/*
    548 					 * interrupt cookie.
    549 					 * - legacy and msi use sc_ihs[0] only
    550 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    551 					 */
    552 	pci_intr_handle_t *sc_intrs;	/*
    553 					 * legacy and msi use sc_intrs[0] only
    554 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    555 					 */
    556 	int sc_nintrs;			/* number of interrupts */
    557 
    558 	int sc_link_intr_idx;		/* index of MSI-X tables */
    559 
    560 	callout_t sc_tick_ch;		/* tick callout */
    561 	bool sc_core_stopping;
    562 
    563 	int sc_nvm_ver_major;
    564 	int sc_nvm_ver_minor;
    565 	int sc_nvm_ver_build;
    566 	int sc_nvm_addrbits;		/* NVM address bits */
    567 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    568 	int sc_ich8_flash_base;
    569 	int sc_ich8_flash_bank_size;
    570 	int sc_nvm_k1_enabled;
    571 
    572 	int sc_nqueues;
    573 	struct wm_queue *sc_queue;
    574 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    575 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    576 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    577 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    578 	struct workqueue *sc_queue_wq;
    579 	bool sc_txrx_use_workqueue;
    580 
    581 	int sc_affinity_offset;
    582 
    583 #ifdef WM_EVENT_COUNTERS
    584 	/* Event counters. */
    585 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    586 
    587 	/* >= WM_T_82542_2_1 */
    588 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    589 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    590 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    591 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    592 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    593 
    594 	struct evcnt sc_ev_crcerrs;	/* CRC Error */
    595 	struct evcnt sc_ev_algnerrc;	/* Alignment Error */
    596 	struct evcnt sc_ev_symerrc;	/* Symbol Error */
    597 	struct evcnt sc_ev_rxerrc;	/* Receive Error */
    598 	struct evcnt sc_ev_mpc;		/* Missed Packets */
    599 	struct evcnt sc_ev_scc;		/* Single Collision */
    600 	struct evcnt sc_ev_ecol;	/* Excessive Collision */
    601 	struct evcnt sc_ev_mcc;		/* Multiple Collision */
    602 	struct evcnt sc_ev_latecol;	/* Late Collision */
    603 	struct evcnt sc_ev_colc;	/* Collision */
    604 	struct evcnt sc_ev_cbtmpc;	/* Circuit Breaker Tx Mng. Packet */
    605 	struct evcnt sc_ev_dc;		/* Defer */
    606 	struct evcnt sc_ev_tncrs;	/* Tx-No CRS */
    607 	struct evcnt sc_ev_sec;		/* Sequence Error */
    608 
    609 	/* Old */
    610 	struct evcnt sc_ev_cexterr;	/* Carrier Extension Error */
    611 	/* New */
    612 	struct evcnt sc_ev_htdpmc;	/* Host Tx Discarded Pkts by MAC */
    613 
    614 	struct evcnt sc_ev_rlec;	/* Receive Length Error */
    615 	struct evcnt sc_ev_cbrdpc;	/* Circuit Breaker Rx Dropped Packet */
    616 	struct evcnt sc_ev_prc64;	/* Packets Rx (64 bytes) */
    617 	struct evcnt sc_ev_prc127;	/* Packets Rx (65-127 bytes) */
    618 	struct evcnt sc_ev_prc255;	/* Packets Rx (128-255 bytes) */
    619 	struct evcnt sc_ev_prc511;	/* Packets Rx (256-511 bytes) */
    620 	struct evcnt sc_ev_prc1023;	/* Packets Rx (512-1023 bytes) */
    621 	struct evcnt sc_ev_prc1522;	/* Packets Rx (1024-1522 bytes) */
    622 	struct evcnt sc_ev_gprc;	/* Good Packets Rx */
    623 	struct evcnt sc_ev_bprc;	/* Broadcast Packets Rx */
    624 	struct evcnt sc_ev_mprc;	/* Multicast Packets Rx */
    625 	struct evcnt sc_ev_gptc;	/* Good Packets Tx */
    626 	struct evcnt sc_ev_gorc;	/* Good Octets Rx */
    627 	struct evcnt sc_ev_gotc;	/* Good Octets Tx */
    628 	struct evcnt sc_ev_rnbc;	/* Rx No Buffers */
    629 	struct evcnt sc_ev_ruc;		/* Rx Undersize */
    630 	struct evcnt sc_ev_rfc;		/* Rx Fragment */
    631 	struct evcnt sc_ev_roc;		/* Rx Oversize */
    632 	struct evcnt sc_ev_rjc;		/* Rx Jabber */
    633 	struct evcnt sc_ev_mgtprc;	/* Management Packets RX */
    634 	struct evcnt sc_ev_mgtpdc;	/* Management Packets Dropped */
    635 	struct evcnt sc_ev_mgtptc;	/* Management Packets TX */
    636 	struct evcnt sc_ev_tor;		/* Total Octets Rx */
    637 	struct evcnt sc_ev_tot;		/* Total Octets Tx */
    638 	struct evcnt sc_ev_tpr;		/* Total Packets Rx */
    639 	struct evcnt sc_ev_tpt;		/* Total Packets Tx */
    640 	struct evcnt sc_ev_ptc64;	/* Packets Tx (64 bytes) */
    641 	struct evcnt sc_ev_ptc127;	/* Packets Tx (65-127 bytes) */
    642 	struct evcnt sc_ev_ptc255;	/* Packets Tx (128-255 bytes) */
    643 	struct evcnt sc_ev_ptc511;	/* Packets Tx (256-511 bytes) */
    644 	struct evcnt sc_ev_ptc1023;	/* Packets Tx (512-1023 bytes) */
    645 	struct evcnt sc_ev_ptc1522;	/* Packets Tx (1024-1522 Bytes) */
    646 	struct evcnt sc_ev_mptc;	/* Multicast Packets Tx */
    647 	struct evcnt sc_ev_bptc;	/* Broadcast Packets Tx */
    648 	struct evcnt sc_ev_tsctc;	/* TCP Segmentation Context Tx */
    649 
    650 	/* Old */
    651 	struct evcnt sc_ev_tsctfc;	/* TCP Segmentation Context Tx Fail */
    652 	/* New */
    653 	struct evcnt sc_ev_cbrmpc;	/* Circuit Breaker Rx Mng. Packet */
    654 
    655 	struct evcnt sc_ev_iac;		/* Interrupt Assertion */
    656 
    657 	/* Old */
    658 	struct evcnt sc_ev_icrxptc;	/* Intr. Cause Rx Pkt Timer Expire */
    659 	struct evcnt sc_ev_icrxatc;	/* Intr. Cause Rx Abs Timer Expire */
    660 	struct evcnt sc_ev_ictxptc;	/* Intr. Cause Tx Pkt Timer Expire */
    661 	struct evcnt sc_ev_ictxatc;	/* Intr. Cause Tx Abs Timer Expire */
    662 	struct evcnt sc_ev_ictxqec;	/* Intr. Cause Tx Queue Empty */
    663 	struct evcnt sc_ev_ictxqmtc;	/* Intr. Cause Tx Queue Min Thresh */
    664 	/*
    665 	 * sc_ev_rxdmtc is shared with both "Intr. cause" and
    666 	 * non "Intr. cause" register.
    667 	 */
    668 	struct evcnt sc_ev_rxdmtc;	/* (Intr. Cause) Rx Desc Min Thresh */
    669 	struct evcnt sc_ev_icrxoc;	/* Intr. Cause Receiver Overrun */
    670 	/* New */
    671 	struct evcnt sc_ev_rpthc;	/* Rx Packets To Host */
    672 	struct evcnt sc_ev_debug1;	/* Debug Counter 1 */
    673 	struct evcnt sc_ev_debug2;	/* Debug Counter 2 */
    674 	struct evcnt sc_ev_debug3;	/* Debug Counter 3 */
    675 	struct evcnt sc_ev_hgptc;	/* Host Good Packets TX */
    676 	struct evcnt sc_ev_debug4;	/* Debug Counter 4 */
    677 	struct evcnt sc_ev_htcbdpc;	/* Host Tx Circuit Breaker Drp. Pkts */
    678 	struct evcnt sc_ev_hgorc;	/* Host Good Octets Rx */
    679 	struct evcnt sc_ev_hgotc;	/* Host Good Octets Tx */
    680 	struct evcnt sc_ev_lenerrs;	/* Length Error */
    681 	struct evcnt sc_ev_tlpic;	/* EEE Tx LPI */
    682 	struct evcnt sc_ev_rlpic;	/* EEE Rx LPI */
    683 	struct evcnt sc_ev_b2ogprc;	/* BMC2OS pkts received by host */
    684 	struct evcnt sc_ev_o2bspc;	/* OS2BMC pkts transmitted by host */
    685 	struct evcnt sc_ev_b2ospc;	/* BMC2OS pkts sent by BMC */
    686 	struct evcnt sc_ev_o2bgptc;	/* OS2BMC pkts received by BMC */
    687 	struct evcnt sc_ev_scvpc;	/* SerDes/SGMII Code Violation Pkt. */
    688 	struct evcnt sc_ev_hrmpc;	/* Header Redirection Missed Packet */
    689 #endif /* WM_EVENT_COUNTERS */
    690 
    691 	struct sysctllog *sc_sysctllog;
    692 
    693 	/* This variable are used only on the 82547. */
    694 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    695 
    696 	uint32_t sc_ctrl;		/* prototype CTRL register */
    697 #if 0
    698 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    699 #endif
    700 	uint32_t sc_icr;		/* prototype interrupt bits */
    701 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    702 	uint32_t sc_tctl;		/* prototype TCTL register */
    703 	uint32_t sc_rctl;		/* prototype RCTL register */
    704 	uint32_t sc_txcw;		/* prototype TXCW register */
    705 	uint32_t sc_tipg;		/* prototype TIPG register */
    706 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    707 	uint32_t sc_pba;		/* prototype PBA register */
    708 
    709 	int sc_tbi_linkup;		/* TBI link status */
    710 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    711 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    712 	struct timeval sc_linkup_delay_time; /* delay LINK_STATE_UP */
    713 
    714 	int sc_mchash_type;		/* multicast filter offset */
    715 
    716 	krndsource_t rnd_source;	/* random source */
    717 
    718 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    719 
    720 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    721 	kmutex_t *sc_ich_phymtx;	/*
    722 					 * 82574/82583/ICH/PCH specific PHY
    723 					 * mutex. For 82574/82583, the mutex
    724 					 * is used for both PHY and NVM.
    725 					 */
    726 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    727 
    728 	struct wm_phyop phy;
    729 	struct wm_nvmop nvm;
    730 
    731 	struct workqueue *sc_reset_wq;
    732 	struct work sc_reset_work;
    733 	volatile unsigned sc_reset_pending;
    734 
    735 	bool sc_dying;
    736 
    737 #ifdef WM_DEBUG
    738 	uint32_t sc_debug;
    739 	bool sc_trigger_reset;
    740 #endif
    741 };
    742 
    743 #define	WM_RXCHAIN_RESET(rxq)						\
    744 do {									\
    745 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    746 	*(rxq)->rxq_tailp = NULL;					\
    747 	(rxq)->rxq_len = 0;						\
    748 } while (/*CONSTCOND*/0)
    749 
    750 #define	WM_RXCHAIN_LINK(rxq, m)						\
    751 do {									\
    752 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    753 	(rxq)->rxq_tailp = &(m)->m_next;				\
    754 } while (/*CONSTCOND*/0)
    755 
    756 #ifdef WM_EVENT_COUNTERS
    757 #ifdef __HAVE_ATOMIC64_LOADSTORE
    758 #define	WM_EVCNT_INCR(ev)						\
    759 	atomic_store_relaxed(&((ev)->ev_count),				\
    760 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
    761 #define	WM_EVCNT_STORE(ev, val)						\
    762 	atomic_store_relaxed(&((ev)->ev_count), (val))
    763 #define	WM_EVCNT_ADD(ev, val)						\
    764 	atomic_store_relaxed(&((ev)->ev_count),				\
    765 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
    766 #else
    767 #define	WM_EVCNT_INCR(ev)						\
    768 	((ev)->ev_count)++
    769 #define	WM_EVCNT_STORE(ev, val)						\
    770 	((ev)->ev_count = (val))
    771 #define	WM_EVCNT_ADD(ev, val)						\
    772 	(ev)->ev_count += (val)
    773 #endif
    774 
    775 #define WM_Q_EVCNT_INCR(qname, evname)			\
    776 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    777 #define WM_Q_EVCNT_STORE(qname, evname, val)		\
    778 	WM_EVCNT_STORE(&(qname)->qname##_ev_##evname, (val))
    779 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    780 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    781 #else /* !WM_EVENT_COUNTERS */
    782 #define	WM_EVCNT_INCR(ev)	__nothing
    783 #define	WM_EVCNT_STORE(ev, val)	__nothing
    784 #define	WM_EVCNT_ADD(ev, val)	__nothing
    785 
    786 #define WM_Q_EVCNT_INCR(qname, evname)		__nothing
    787 #define WM_Q_EVCNT_STORE(qname, evname, val)	__nothing
    788 #define WM_Q_EVCNT_ADD(qname, evname, val)	__nothing
    789 #endif /* !WM_EVENT_COUNTERS */
    790 
    791 #define	CSR_READ(sc, reg)						\
    792 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    793 #define	CSR_WRITE(sc, reg, val)						\
    794 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    795 #define	CSR_WRITE_FLUSH(sc)						\
    796 	(void)CSR_READ((sc), WMREG_STATUS)
    797 
    798 #define ICH8_FLASH_READ32(sc, reg)					\
    799 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    800 	    (reg) + sc->sc_flashreg_offset)
    801 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    802 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    803 	    (reg) + sc->sc_flashreg_offset, (data))
    804 
    805 #define ICH8_FLASH_READ16(sc, reg)					\
    806 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    807 	    (reg) + sc->sc_flashreg_offset)
    808 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    809 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    810 	    (reg) + sc->sc_flashreg_offset, (data))
    811 
    812 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    813 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    814 
    815 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    816 #define	WM_CDTXADDR_HI(txq, x)						\
    817 	(sizeof(bus_addr_t) == 8 ?					\
    818 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    819 
    820 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    821 #define	WM_CDRXADDR_HI(rxq, x)						\
    822 	(sizeof(bus_addr_t) == 8 ?					\
    823 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    824 
    825 /*
    826  * Register read/write functions.
    827  * Other than CSR_{READ|WRITE}().
    828  */
    829 #if 0
    830 static inline uint32_t wm_io_read(struct wm_softc *, int);
    831 #endif
    832 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    833 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    834     uint32_t, uint32_t);
    835 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    836 
    837 /*
    838  * Descriptor sync/init functions.
    839  */
    840 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    841 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    842 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    843 
    844 /*
    845  * Device driver interface functions and commonly used functions.
    846  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    847  */
    848 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    849 static int	wm_match(device_t, cfdata_t, void *);
    850 static void	wm_attach(device_t, device_t, void *);
    851 static int	wm_detach(device_t, int);
    852 static bool	wm_suspend(device_t, const pmf_qual_t *);
    853 static bool	wm_resume(device_t, const pmf_qual_t *);
    854 static bool	wm_watchdog(struct ifnet *);
    855 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    856     uint16_t *);
    857 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    858     uint16_t *);
    859 static void	wm_tick(void *);
    860 static int	wm_ifflags_cb(struct ethercom *);
    861 static int	wm_ioctl(struct ifnet *, u_long, void *);
    862 /* MAC address related */
    863 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    864 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    865 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    866 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    867 static int	wm_rar_count(struct wm_softc *);
    868 static void	wm_set_filter(struct wm_softc *);
    869 /* Reset and init related */
    870 static void	wm_set_vlan(struct wm_softc *);
    871 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    872 static void	wm_get_auto_rd_done(struct wm_softc *);
    873 static void	wm_lan_init_done(struct wm_softc *);
    874 static void	wm_get_cfg_done(struct wm_softc *);
    875 static int	wm_phy_post_reset(struct wm_softc *);
    876 static int	wm_write_smbus_addr(struct wm_softc *);
    877 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    878 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    879 static void	wm_initialize_hardware_bits(struct wm_softc *);
    880 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    881 static int	wm_reset_phy(struct wm_softc *);
    882 static void	wm_flush_desc_rings(struct wm_softc *);
    883 static void	wm_reset(struct wm_softc *);
    884 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    885 static void	wm_rxdrain(struct wm_rxqueue *);
    886 static void	wm_init_rss(struct wm_softc *);
    887 static void	wm_adjust_qnum(struct wm_softc *, int);
    888 static inline bool	wm_is_using_msix(struct wm_softc *);
    889 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    890 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    891 static int	wm_setup_legacy(struct wm_softc *);
    892 static int	wm_setup_msix(struct wm_softc *);
    893 static int	wm_init(struct ifnet *);
    894 static int	wm_init_locked(struct ifnet *);
    895 static void	wm_init_sysctls(struct wm_softc *);
    896 static void	wm_update_stats(struct wm_softc *);
    897 static void	wm_clear_evcnt(struct wm_softc *);
    898 static void	wm_unset_stopping_flags(struct wm_softc *);
    899 static void	wm_set_stopping_flags(struct wm_softc *);
    900 static void	wm_stop(struct ifnet *, int);
    901 static void	wm_stop_locked(struct ifnet *, bool, bool);
    902 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    903 static void	wm_82547_txfifo_stall(void *);
    904 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    905 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    906 /* DMA related */
    907 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    908 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    909 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    910 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    911     struct wm_txqueue *);
    912 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    913 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    914 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    915     struct wm_rxqueue *);
    916 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    917 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    918 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    919 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    920 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    921 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    922 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    923     struct wm_txqueue *);
    924 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    925     struct wm_rxqueue *);
    926 static int	wm_alloc_txrx_queues(struct wm_softc *);
    927 static void	wm_free_txrx_queues(struct wm_softc *);
    928 static int	wm_init_txrx_queues(struct wm_softc *);
    929 /* Start */
    930 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    931     struct wm_txsoft *, uint32_t *, uint8_t *);
    932 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    933 static void	wm_start(struct ifnet *);
    934 static void	wm_start_locked(struct ifnet *);
    935 static int	wm_transmit(struct ifnet *, struct mbuf *);
    936 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    937 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    938     bool);
    939 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    940     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    941 static void	wm_nq_start(struct ifnet *);
    942 static void	wm_nq_start_locked(struct ifnet *);
    943 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    944 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    945 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    946     bool);
    947 static void	wm_deferred_start_locked(struct wm_txqueue *);
    948 static void	wm_handle_queue(void *);
    949 static void	wm_handle_queue_work(struct work *, void *);
    950 static void	wm_handle_reset_work(struct work *, void *);
    951 /* Interrupt */
    952 static bool	wm_txeof(struct wm_txqueue *, u_int);
    953 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    954 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    955 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    956 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    957 static void	wm_linkintr(struct wm_softc *, uint32_t);
    958 static int	wm_intr_legacy(void *);
    959 static inline void	wm_txrxintr_disable(struct wm_queue *);
    960 static inline void	wm_txrxintr_enable(struct wm_queue *);
    961 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    962 static int	wm_txrxintr_msix(void *);
    963 static int	wm_linkintr_msix(void *);
    964 
    965 /*
    966  * Media related.
    967  * GMII, SGMII, TBI, SERDES and SFP.
    968  */
    969 /* Common */
    970 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    971 /* GMII related */
    972 static void	wm_gmii_reset(struct wm_softc *);
    973 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    974 static int	wm_get_phy_id_82575(struct wm_softc *);
    975 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    976 static int	wm_gmii_mediachange(struct ifnet *);
    977 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    978 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    979 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    980 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    981 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    982 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    983 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    984 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    985 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    986 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    987 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    988 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    989 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    990 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    991 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    992 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    993 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    994 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    995 	bool);
    996 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    997 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    998 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    999 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
   1000 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
   1001 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
   1002 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
   1003 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
   1004 static void	wm_gmii_statchg(struct ifnet *);
   1005 /*
   1006  * kumeran related (80003, ICH* and PCH*).
   1007  * These functions are not for accessing MII registers but for accessing
   1008  * kumeran specific registers.
   1009  */
   1010 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
   1011 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
   1012 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
   1013 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
   1014 /* EMI register related */
   1015 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
   1016 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
   1017 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
   1018 /* SGMII */
   1019 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
   1020 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
   1021 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
   1022 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
   1023 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
   1024 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
   1025 /* TBI related */
   1026 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
   1027 static void	wm_tbi_mediainit(struct wm_softc *);
   1028 static int	wm_tbi_mediachange(struct ifnet *);
   1029 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
   1030 static int	wm_check_for_link(struct wm_softc *);
   1031 static void	wm_tbi_tick(struct wm_softc *);
   1032 /* SERDES related */
   1033 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
   1034 static int	wm_serdes_mediachange(struct ifnet *);
   1035 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
   1036 static void	wm_serdes_tick(struct wm_softc *);
   1037 /* SFP related */
   1038 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
   1039 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
   1040 
   1041 /*
   1042  * NVM related.
   1043  * Microwire, SPI (w/wo EERD) and Flash.
   1044  */
   1045 /* Misc functions */
   1046 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
   1047 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
   1048 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
   1049 /* Microwire */
   1050 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
   1051 /* SPI */
   1052 static int	wm_nvm_ready_spi(struct wm_softc *);
   1053 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
   1054 /* Using with EERD */
   1055 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
   1056 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
   1057 /* Flash */
   1058 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
   1059     unsigned int *);
   1060 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
   1061 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
   1062 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
   1063     uint32_t *);
   1064 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
   1065 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
   1066 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
   1067 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
   1068 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
   1069 /* iNVM */
   1070 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
   1071 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
   1072 /* Lock, detecting NVM type, validate checksum and read */
   1073 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
   1074 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
   1075 static int	wm_nvm_validate_checksum(struct wm_softc *);
   1076 static void	wm_nvm_version_invm(struct wm_softc *);
   1077 static void	wm_nvm_version(struct wm_softc *);
   1078 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
   1079 
   1080 /*
   1081  * Hardware semaphores.
   1082  * Very complexed...
   1083  */
   1084 static int	wm_get_null(struct wm_softc *);
   1085 static void	wm_put_null(struct wm_softc *);
   1086 static int	wm_get_eecd(struct wm_softc *);
   1087 static void	wm_put_eecd(struct wm_softc *);
   1088 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
   1089 static void	wm_put_swsm_semaphore(struct wm_softc *);
   1090 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
   1091 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
   1092 static int	wm_get_nvm_80003(struct wm_softc *);
   1093 static void	wm_put_nvm_80003(struct wm_softc *);
   1094 static int	wm_get_nvm_82571(struct wm_softc *);
   1095 static void	wm_put_nvm_82571(struct wm_softc *);
   1096 static int	wm_get_phy_82575(struct wm_softc *);
   1097 static void	wm_put_phy_82575(struct wm_softc *);
   1098 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
   1099 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
   1100 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
   1101 static void	wm_put_swflag_ich8lan(struct wm_softc *);
   1102 static int	wm_get_nvm_ich8lan(struct wm_softc *);
   1103 static void	wm_put_nvm_ich8lan(struct wm_softc *);
   1104 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
   1105 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
   1106 
   1107 /*
   1108  * Management mode and power management related subroutines.
   1109  * BMC, AMT, suspend/resume and EEE.
   1110  */
   1111 #if 0
   1112 static int	wm_check_mng_mode(struct wm_softc *);
   1113 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
   1114 static int	wm_check_mng_mode_82574(struct wm_softc *);
   1115 static int	wm_check_mng_mode_generic(struct wm_softc *);
   1116 #endif
   1117 static int	wm_enable_mng_pass_thru(struct wm_softc *);
   1118 static bool	wm_phy_resetisblocked(struct wm_softc *);
   1119 static void	wm_get_hw_control(struct wm_softc *);
   1120 static void	wm_release_hw_control(struct wm_softc *);
   1121 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
   1122 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
   1123 static void	wm_init_manageability(struct wm_softc *);
   1124 static void	wm_release_manageability(struct wm_softc *);
   1125 static void	wm_get_wakeup(struct wm_softc *);
   1126 static int	wm_ulp_disable(struct wm_softc *);
   1127 static int	wm_enable_phy_wakeup(struct wm_softc *);
   1128 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
   1129 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
   1130 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1131 static void	wm_enable_wakeup(struct wm_softc *);
   1132 static void	wm_disable_aspm(struct wm_softc *);
   1133 /* LPLU (Low Power Link Up) */
   1134 static void	wm_lplu_d0_disable(struct wm_softc *);
   1135 /* EEE */
   1136 static int	wm_set_eee_i350(struct wm_softc *);
   1137 static int	wm_set_eee_pchlan(struct wm_softc *);
   1138 static int	wm_set_eee(struct wm_softc *);
   1139 
   1140 /*
   1141  * Workarounds (mainly PHY related).
   1142  * Basically, PHY's workarounds are in the PHY drivers.
   1143  */
   1144 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1145 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1146 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1147 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1148 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
   1149 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
   1150 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1151 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1152 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1153 static int	wm_k1_workaround_lv(struct wm_softc *);
   1154 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1155 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1156 static int	wm_set_mdio_slow_mode_hv_locked(struct wm_softc *);
   1157 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1158 static void	wm_reset_init_script_82575(struct wm_softc *);
   1159 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1160 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1161 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1162 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1163 static int	wm_pll_workaround_i210(struct wm_softc *);
   1164 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1165 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
   1166 static void	wm_set_linkdown_discard(struct wm_softc *);
   1167 static void	wm_clear_linkdown_discard(struct wm_softc *);
   1168 
   1169 static int	wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
   1170 static int	wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
   1171 #ifdef WM_DEBUG
   1172 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
   1173 #endif
   1174 
   1175 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1176     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1177 
   1178 /*
   1179  * Devices supported by this driver.
   1180  */
   1181 static const struct wm_product {
   1182 	pci_vendor_id_t		wmp_vendor;
   1183 	pci_product_id_t	wmp_product;
   1184 	const char		*wmp_name;
   1185 	wm_chip_type		wmp_type;
   1186 	uint32_t		wmp_flags;
   1187 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1188 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1189 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1190 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1191 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1192 } wm_products[] = {
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1194 	  "Intel i82542 1000BASE-X Ethernet",
   1195 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1196 
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1198 	  "Intel i82543GC 1000BASE-X Ethernet",
   1199 	  WM_T_82543,		WMP_F_FIBER },
   1200 
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1202 	  "Intel i82543GC 1000BASE-T Ethernet",
   1203 	  WM_T_82543,		WMP_F_COPPER },
   1204 
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1206 	  "Intel i82544EI 1000BASE-T Ethernet",
   1207 	  WM_T_82544,		WMP_F_COPPER },
   1208 
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1210 	  "Intel i82544EI 1000BASE-X Ethernet",
   1211 	  WM_T_82544,		WMP_F_FIBER },
   1212 
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1214 	  "Intel i82544GC 1000BASE-T Ethernet",
   1215 	  WM_T_82544,		WMP_F_COPPER },
   1216 
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1218 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1219 	  WM_T_82544,		WMP_F_COPPER },
   1220 
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1222 	  "Intel i82540EM 1000BASE-T Ethernet",
   1223 	  WM_T_82540,		WMP_F_COPPER },
   1224 
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1226 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1227 	  WM_T_82540,		WMP_F_COPPER },
   1228 
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1230 	  "Intel i82540EP 1000BASE-T Ethernet",
   1231 	  WM_T_82540,		WMP_F_COPPER },
   1232 
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1234 	  "Intel i82540EP 1000BASE-T Ethernet",
   1235 	  WM_T_82540,		WMP_F_COPPER },
   1236 
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1238 	  "Intel i82540EP 1000BASE-T Ethernet",
   1239 	  WM_T_82540,		WMP_F_COPPER },
   1240 
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1242 	  "Intel i82545EM 1000BASE-T Ethernet",
   1243 	  WM_T_82545,		WMP_F_COPPER },
   1244 
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1246 	  "Intel i82545GM 1000BASE-T Ethernet",
   1247 	  WM_T_82545_3,		WMP_F_COPPER },
   1248 
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1250 	  "Intel i82545GM 1000BASE-X Ethernet",
   1251 	  WM_T_82545_3,		WMP_F_FIBER },
   1252 
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1254 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1255 	  WM_T_82545_3,		WMP_F_SERDES },
   1256 
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1258 	  "Intel i82546EB 1000BASE-T Ethernet",
   1259 	  WM_T_82546,		WMP_F_COPPER },
   1260 
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1262 	  "Intel i82546EB 1000BASE-T Ethernet",
   1263 	  WM_T_82546,		WMP_F_COPPER },
   1264 
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1266 	  "Intel i82545EM 1000BASE-X Ethernet",
   1267 	  WM_T_82545,		WMP_F_FIBER },
   1268 
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1270 	  "Intel i82546EB 1000BASE-X Ethernet",
   1271 	  WM_T_82546,		WMP_F_FIBER },
   1272 
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1274 	  "Intel i82546GB 1000BASE-T Ethernet",
   1275 	  WM_T_82546_3,		WMP_F_COPPER },
   1276 
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1278 	  "Intel i82546GB 1000BASE-X Ethernet",
   1279 	  WM_T_82546_3,		WMP_F_FIBER },
   1280 
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1282 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1283 	  WM_T_82546_3,		WMP_F_SERDES },
   1284 
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1286 	  "i82546GB quad-port Gigabit Ethernet",
   1287 	  WM_T_82546_3,		WMP_F_COPPER },
   1288 
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1290 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1291 	  WM_T_82546_3,		WMP_F_COPPER },
   1292 
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1294 	  "Intel PRO/1000MT (82546GB)",
   1295 	  WM_T_82546_3,		WMP_F_COPPER },
   1296 
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1298 	  "Intel i82541EI 1000BASE-T Ethernet",
   1299 	  WM_T_82541,		WMP_F_COPPER },
   1300 
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1302 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1303 	  WM_T_82541,		WMP_F_COPPER },
   1304 
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1306 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1307 	  WM_T_82541,		WMP_F_COPPER },
   1308 
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1310 	  "Intel i82541ER 1000BASE-T Ethernet",
   1311 	  WM_T_82541_2,		WMP_F_COPPER },
   1312 
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1314 	  "Intel i82541GI 1000BASE-T Ethernet",
   1315 	  WM_T_82541_2,		WMP_F_COPPER },
   1316 
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1318 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1319 	  WM_T_82541_2,		WMP_F_COPPER },
   1320 
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1322 	  "Intel i82541PI 1000BASE-T Ethernet",
   1323 	  WM_T_82541_2,		WMP_F_COPPER },
   1324 
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1326 	  "Intel i82547EI 1000BASE-T Ethernet",
   1327 	  WM_T_82547,		WMP_F_COPPER },
   1328 
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1330 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1331 	  WM_T_82547,		WMP_F_COPPER },
   1332 
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1334 	  "Intel i82547GI 1000BASE-T Ethernet",
   1335 	  WM_T_82547_2,		WMP_F_COPPER },
   1336 
   1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1338 	  "Intel PRO/1000 PT (82571EB)",
   1339 	  WM_T_82571,		WMP_F_COPPER },
   1340 
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1342 	  "Intel PRO/1000 PF (82571EB)",
   1343 	  WM_T_82571,		WMP_F_FIBER },
   1344 
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1346 	  "Intel PRO/1000 PB (82571EB)",
   1347 	  WM_T_82571,		WMP_F_SERDES },
   1348 
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1350 	  "Intel PRO/1000 QT (82571EB)",
   1351 	  WM_T_82571,		WMP_F_COPPER },
   1352 
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1354 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1355 	  WM_T_82571,		WMP_F_COPPER },
   1356 
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1358 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1359 	  WM_T_82571,		WMP_F_COPPER },
   1360 
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1362 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1363 	  WM_T_82571,		WMP_F_SERDES },
   1364 
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1366 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1367 	  WM_T_82571,		WMP_F_SERDES },
   1368 
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1370 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1371 	  WM_T_82571,		WMP_F_FIBER },
   1372 
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1374 	  "Intel i82572EI 1000baseT Ethernet",
   1375 	  WM_T_82572,		WMP_F_COPPER },
   1376 
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1378 	  "Intel i82572EI 1000baseX Ethernet",
   1379 	  WM_T_82572,		WMP_F_FIBER },
   1380 
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1382 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1383 	  WM_T_82572,		WMP_F_SERDES },
   1384 
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1386 	  "Intel i82572EI 1000baseT Ethernet",
   1387 	  WM_T_82572,		WMP_F_COPPER },
   1388 
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1390 	  "Intel i82573E",
   1391 	  WM_T_82573,		WMP_F_COPPER },
   1392 
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1394 	  "Intel i82573E IAMT",
   1395 	  WM_T_82573,		WMP_F_COPPER },
   1396 
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1398 	  "Intel i82573L Gigabit Ethernet",
   1399 	  WM_T_82573,		WMP_F_COPPER },
   1400 
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1402 	  "Intel i82574L",
   1403 	  WM_T_82574,		WMP_F_COPPER },
   1404 
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1406 	  "Intel i82574L",
   1407 	  WM_T_82574,		WMP_F_COPPER },
   1408 
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1410 	  "Intel i82583V",
   1411 	  WM_T_82583,		WMP_F_COPPER },
   1412 
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1414 	  "i80003 dual 1000baseT Ethernet",
   1415 	  WM_T_80003,		WMP_F_COPPER },
   1416 
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1418 	  "i80003 dual 1000baseX Ethernet",
   1419 	  WM_T_80003,		WMP_F_COPPER },
   1420 
   1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1422 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1423 	  WM_T_80003,		WMP_F_SERDES },
   1424 
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1426 	  "Intel i80003 1000baseT Ethernet",
   1427 	  WM_T_80003,		WMP_F_COPPER },
   1428 
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1430 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1431 	  WM_T_80003,		WMP_F_SERDES },
   1432 
   1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1434 	  "Intel i82801H (M_AMT) LAN Controller",
   1435 	  WM_T_ICH8,		WMP_F_COPPER },
   1436 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1437 	  "Intel i82801H (AMT) LAN Controller",
   1438 	  WM_T_ICH8,		WMP_F_COPPER },
   1439 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1440 	  "Intel i82801H LAN Controller",
   1441 	  WM_T_ICH8,		WMP_F_COPPER },
   1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1443 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1444 	  WM_T_ICH8,		WMP_F_COPPER },
   1445 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1446 	  "Intel i82801H (M) LAN Controller",
   1447 	  WM_T_ICH8,		WMP_F_COPPER },
   1448 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1449 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1450 	  WM_T_ICH8,		WMP_F_COPPER },
   1451 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1452 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1453 	  WM_T_ICH8,		WMP_F_COPPER },
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1455 	  "82567V-3 LAN Controller",
   1456 	  WM_T_ICH8,		WMP_F_COPPER },
   1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1458 	  "82801I (AMT) LAN Controller",
   1459 	  WM_T_ICH9,		WMP_F_COPPER },
   1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1461 	  "82801I 10/100 LAN Controller",
   1462 	  WM_T_ICH9,		WMP_F_COPPER },
   1463 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1464 	  "82801I (G) 10/100 LAN Controller",
   1465 	  WM_T_ICH9,		WMP_F_COPPER },
   1466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1467 	  "82801I (GT) 10/100 LAN Controller",
   1468 	  WM_T_ICH9,		WMP_F_COPPER },
   1469 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1470 	  "82801I (C) LAN Controller",
   1471 	  WM_T_ICH9,		WMP_F_COPPER },
   1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1473 	  "82801I mobile LAN Controller",
   1474 	  WM_T_ICH9,		WMP_F_COPPER },
   1475 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1476 	  "82801I mobile (V) LAN Controller",
   1477 	  WM_T_ICH9,		WMP_F_COPPER },
   1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1479 	  "82801I mobile (AMT) LAN Controller",
   1480 	  WM_T_ICH9,		WMP_F_COPPER },
   1481 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1482 	  "82567LM-4 LAN Controller",
   1483 	  WM_T_ICH9,		WMP_F_COPPER },
   1484 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1485 	  "82567LM-2 LAN Controller",
   1486 	  WM_T_ICH10,		WMP_F_COPPER },
   1487 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1488 	  "82567LF-2 LAN Controller",
   1489 	  WM_T_ICH10,		WMP_F_COPPER },
   1490 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1491 	  "82567LM-3 LAN Controller",
   1492 	  WM_T_ICH10,		WMP_F_COPPER },
   1493 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1494 	  "82567LF-3 LAN Controller",
   1495 	  WM_T_ICH10,		WMP_F_COPPER },
   1496 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1497 	  "82567V-2 LAN Controller",
   1498 	  WM_T_ICH10,		WMP_F_COPPER },
   1499 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1500 	  "82567V-3? LAN Controller",
   1501 	  WM_T_ICH10,		WMP_F_COPPER },
   1502 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1503 	  "HANKSVILLE LAN Controller",
   1504 	  WM_T_ICH10,		WMP_F_COPPER },
   1505 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1506 	  "PCH LAN (82577LM) Controller",
   1507 	  WM_T_PCH,		WMP_F_COPPER },
   1508 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1509 	  "PCH LAN (82577LC) Controller",
   1510 	  WM_T_PCH,		WMP_F_COPPER },
   1511 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1512 	  "PCH LAN (82578DM) Controller",
   1513 	  WM_T_PCH,		WMP_F_COPPER },
   1514 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1515 	  "PCH LAN (82578DC) Controller",
   1516 	  WM_T_PCH,		WMP_F_COPPER },
   1517 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1518 	  "PCH2 LAN (82579LM) Controller",
   1519 	  WM_T_PCH2,		WMP_F_COPPER },
   1520 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1521 	  "PCH2 LAN (82579V) Controller",
   1522 	  WM_T_PCH2,		WMP_F_COPPER },
   1523 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1524 	  "82575EB dual-1000baseT Ethernet",
   1525 	  WM_T_82575,		WMP_F_COPPER },
   1526 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1527 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1528 	  WM_T_82575,		WMP_F_SERDES },
   1529 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1530 	  "82575GB quad-1000baseT Ethernet",
   1531 	  WM_T_82575,		WMP_F_COPPER },
   1532 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1533 	  "82575GB quad-1000baseT Ethernet (PM)",
   1534 	  WM_T_82575,		WMP_F_COPPER },
   1535 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1536 	  "82576 1000BaseT Ethernet",
   1537 	  WM_T_82576,		WMP_F_COPPER },
   1538 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1539 	  "82576 1000BaseX Ethernet",
   1540 	  WM_T_82576,		WMP_F_FIBER },
   1541 
   1542 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1543 	  "82576 gigabit Ethernet (SERDES)",
   1544 	  WM_T_82576,		WMP_F_SERDES },
   1545 
   1546 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1547 	  "82576 quad-1000BaseT Ethernet",
   1548 	  WM_T_82576,		WMP_F_COPPER },
   1549 
   1550 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1551 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1552 	  WM_T_82576,		WMP_F_COPPER },
   1553 
   1554 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1555 	  "82576 gigabit Ethernet",
   1556 	  WM_T_82576,		WMP_F_COPPER },
   1557 
   1558 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1559 	  "82576 gigabit Ethernet (SERDES)",
   1560 	  WM_T_82576,		WMP_F_SERDES },
   1561 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1562 	  "82576 quad-gigabit Ethernet (SERDES)",
   1563 	  WM_T_82576,		WMP_F_SERDES },
   1564 
   1565 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1566 	  "82580 1000BaseT Ethernet",
   1567 	  WM_T_82580,		WMP_F_COPPER },
   1568 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1569 	  "82580 1000BaseX Ethernet",
   1570 	  WM_T_82580,		WMP_F_FIBER },
   1571 
   1572 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1573 	  "82580 1000BaseT Ethernet (SERDES)",
   1574 	  WM_T_82580,		WMP_F_SERDES },
   1575 
   1576 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1577 	  "82580 gigabit Ethernet (SGMII)",
   1578 	  WM_T_82580,		WMP_F_COPPER },
   1579 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1580 	  "82580 dual-1000BaseT Ethernet",
   1581 	  WM_T_82580,		WMP_F_COPPER },
   1582 
   1583 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1584 	  "82580 quad-1000BaseX Ethernet",
   1585 	  WM_T_82580,		WMP_F_FIBER },
   1586 
   1587 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1588 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1589 	  WM_T_82580,		WMP_F_COPPER },
   1590 
   1591 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1592 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1593 	  WM_T_82580,		WMP_F_SERDES },
   1594 
   1595 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1596 	  "DH89XXCC 1000BASE-KX Ethernet",
   1597 	  WM_T_82580,		WMP_F_SERDES },
   1598 
   1599 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1600 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1601 	  WM_T_82580,		WMP_F_SERDES },
   1602 
   1603 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1604 	  "I350 Gigabit Network Connection",
   1605 	  WM_T_I350,		WMP_F_COPPER },
   1606 
   1607 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1608 	  "I350 Gigabit Fiber Network Connection",
   1609 	  WM_T_I350,		WMP_F_FIBER },
   1610 
   1611 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1612 	  "I350 Gigabit Backplane Connection",
   1613 	  WM_T_I350,		WMP_F_SERDES },
   1614 
   1615 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1616 	  "I350 Quad Port Gigabit Ethernet",
   1617 	  WM_T_I350,		WMP_F_SERDES },
   1618 
   1619 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1620 	  "I350 Gigabit Connection",
   1621 	  WM_T_I350,		WMP_F_COPPER },
   1622 
   1623 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1624 	  "I354 Gigabit Ethernet (KX)",
   1625 	  WM_T_I354,		WMP_F_SERDES },
   1626 
   1627 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1628 	  "I354 Gigabit Ethernet (SGMII)",
   1629 	  WM_T_I354,		WMP_F_COPPER },
   1630 
   1631 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1632 	  "I354 Gigabit Ethernet (2.5G)",
   1633 	  WM_T_I354,		WMP_F_COPPER },
   1634 
   1635 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1636 	  "I210-T1 Ethernet Server Adapter",
   1637 	  WM_T_I210,		WMP_F_COPPER },
   1638 
   1639 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1640 	  "I210 Ethernet (Copper OEM)",
   1641 	  WM_T_I210,		WMP_F_COPPER },
   1642 
   1643 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1644 	  "I210 Ethernet (Copper IT)",
   1645 	  WM_T_I210,		WMP_F_COPPER },
   1646 
   1647 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1648 	  "I210 Ethernet (Copper, FLASH less)",
   1649 	  WM_T_I210,		WMP_F_COPPER },
   1650 
   1651 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1652 	  "I210 Gigabit Ethernet (Fiber)",
   1653 	  WM_T_I210,		WMP_F_FIBER },
   1654 
   1655 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1656 	  "I210 Gigabit Ethernet (SERDES)",
   1657 	  WM_T_I210,		WMP_F_SERDES },
   1658 
   1659 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1660 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1661 	  WM_T_I210,		WMP_F_SERDES },
   1662 
   1663 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1664 	  "I210 Gigabit Ethernet (SGMII)",
   1665 	  WM_T_I210,		WMP_F_COPPER },
   1666 
   1667 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1668 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1669 	  WM_T_I210,		WMP_F_COPPER },
   1670 
   1671 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1672 	  "I211 Ethernet (COPPER)",
   1673 	  WM_T_I211,		WMP_F_COPPER },
   1674 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1675 	  "I217 V Ethernet Connection",
   1676 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1677 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1678 	  "I217 LM Ethernet Connection",
   1679 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1680 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1681 	  "I218 V Ethernet Connection",
   1682 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1683 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1684 	  "I218 V Ethernet Connection",
   1685 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1686 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1687 	  "I218 V Ethernet Connection",
   1688 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1689 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1690 	  "I218 LM Ethernet Connection",
   1691 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1692 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1693 	  "I218 LM Ethernet Connection",
   1694 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1695 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1696 	  "I218 LM Ethernet Connection",
   1697 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1698 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1699 	  "I219 LM Ethernet Connection",
   1700 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1701 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1702 	  "I219 LM (2) Ethernet Connection",
   1703 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1704 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1705 	  "I219 LM (3) Ethernet Connection",
   1706 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1707 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1708 	  "I219 LM (4) Ethernet Connection",
   1709 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1710 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1711 	  "I219 LM (5) Ethernet Connection",
   1712 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1713 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1714 	  "I219 LM (6) Ethernet Connection",
   1715 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1716 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1717 	  "I219 LM (7) Ethernet Connection",
   1718 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1719 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1720 	  "I219 LM (8) Ethernet Connection",
   1721 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1722 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1723 	  "I219 LM (9) Ethernet Connection",
   1724 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1725 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1726 	  "I219 LM (10) Ethernet Connection",
   1727 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1728 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1729 	  "I219 LM (11) Ethernet Connection",
   1730 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1731 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1732 	  "I219 LM (12) Ethernet Connection",
   1733 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1734 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1735 	  "I219 LM (13) Ethernet Connection",
   1736 	  WM_T_PCH_TGP,		WMP_F_COPPER },
   1737 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1738 	  "I219 LM (14) Ethernet Connection",
   1739 	  WM_T_PCH_TGP,		WMP_F_COPPER },
   1740 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1741 	  "I219 LM (15) Ethernet Connection",
   1742 	  WM_T_PCH_TGP,		WMP_F_COPPER },
   1743 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM16,
   1744 	  "I219 LM (16) Ethernet Connection",
   1745 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP */
   1746 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM17,
   1747 	  "I219 LM (17) Ethernet Connection",
   1748 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP */
   1749 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM18,
   1750 	  "I219 LM (18) Ethernet Connection",
   1751 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
   1752 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM19,
   1753 	  "I219 LM (19) Ethernet Connection",
   1754 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
   1755 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM20,
   1756 	  "I219 LM (20) Ethernet Connection",
   1757 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
   1758 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM21,
   1759 	  "I219 LM (21) Ethernet Connection",
   1760 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
   1761 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM22,
   1762 	  "I219 LM (22) Ethernet Connection",
   1763 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP(RPL) */
   1764 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM23,
   1765 	  "I219 LM (23) Ethernet Connection",
   1766 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP(RPL) */
   1767 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1768 	  "I219 V Ethernet Connection",
   1769 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1770 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1771 	  "I219 V (2) Ethernet Connection",
   1772 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1773 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1774 	  "I219 V (4) Ethernet Connection",
   1775 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1776 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1777 	  "I219 V (5) Ethernet Connection",
   1778 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1779 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1780 	  "I219 V (6) Ethernet Connection",
   1781 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1782 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1783 	  "I219 V (7) Ethernet Connection",
   1784 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1785 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1786 	  "I219 V (8) Ethernet Connection",
   1787 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1788 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1789 	  "I219 V (9) Ethernet Connection",
   1790 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1791 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1792 	  "I219 V (10) Ethernet Connection",
   1793 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1794 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1795 	  "I219 V (11) Ethernet Connection",
   1796 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1797 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1798 	  "I219 V (12) Ethernet Connection",
   1799 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1800 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1801 	  "I219 V (13) Ethernet Connection",
   1802 	  WM_T_PCH_TGP,		WMP_F_COPPER },
   1803 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1804 	  "I219 V (14) Ethernet Connection",
   1805 	  WM_T_PCH_TGP,		WMP_F_COPPER },
   1806 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V15,
   1807 	  "I219 V (15) Ethernet Connection",
   1808 	  WM_T_PCH_TGP,		WMP_F_COPPER },
   1809 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V16,
   1810 	  "I219 V (16) Ethernet Connection",
   1811 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP */
   1812 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V17,
   1813 	  "I219 V (17) Ethernet Connection",
   1814 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP */
   1815 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V18,
   1816 	  "I219 V (18) Ethernet Connection",
   1817 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
   1818 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V19,
   1819 	  "I219 V (19) Ethernet Connection",
   1820 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
   1821 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V20,
   1822 	  "I219 V (20) Ethernet Connection",
   1823 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
   1824 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V21,
   1825 	  "I219 V (21) Ethernet Connection",
   1826 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
   1827 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V22,
   1828 	  "I219 V (22) Ethernet Connection",
   1829 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP(RPL) */
   1830 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V23,
   1831 	  "I219 V (23) Ethernet Connection",
   1832 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP(RPL) */
   1833 	{ 0,			0,
   1834 	  NULL,
   1835 	  0,			0 },
   1836 };
   1837 
   1838 /*
   1839  * Register read/write functions.
   1840  * Other than CSR_{READ|WRITE}().
   1841  */
   1842 
   1843 #if 0 /* Not currently used */
   1844 static inline uint32_t
   1845 wm_io_read(struct wm_softc *sc, int reg)
   1846 {
   1847 
   1848 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1849 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1850 }
   1851 #endif
   1852 
   1853 static inline void
   1854 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1855 {
   1856 
   1857 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1858 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1859 }
   1860 
   1861 static inline void
   1862 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1863     uint32_t data)
   1864 {
   1865 	uint32_t regval;
   1866 	int i;
   1867 
   1868 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1869 
   1870 	CSR_WRITE(sc, reg, regval);
   1871 
   1872 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1873 		delay(5);
   1874 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1875 			break;
   1876 	}
   1877 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1878 		aprint_error("%s: WARNING:"
   1879 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1880 		    device_xname(sc->sc_dev), reg);
   1881 	}
   1882 }
   1883 
   1884 static inline void
   1885 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1886 {
   1887 	wa->wa_low = htole32(BUS_ADDR_LO32(v));
   1888 	wa->wa_high = htole32(BUS_ADDR_HI32(v));
   1889 }
   1890 
   1891 /*
   1892  * Descriptor sync/init functions.
   1893  */
   1894 static inline void
   1895 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1896 {
   1897 	struct wm_softc *sc = txq->txq_sc;
   1898 
   1899 	/* If it will wrap around, sync to the end of the ring. */
   1900 	if ((start + num) > WM_NTXDESC(txq)) {
   1901 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1902 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1903 		    (WM_NTXDESC(txq) - start), ops);
   1904 		num -= (WM_NTXDESC(txq) - start);
   1905 		start = 0;
   1906 	}
   1907 
   1908 	/* Now sync whatever is left. */
   1909 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1910 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1911 }
   1912 
   1913 static inline void
   1914 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1915 {
   1916 	struct wm_softc *sc = rxq->rxq_sc;
   1917 
   1918 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1919 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1920 }
   1921 
   1922 static inline void
   1923 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1924 {
   1925 	struct wm_softc *sc = rxq->rxq_sc;
   1926 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1927 	struct mbuf *m = rxs->rxs_mbuf;
   1928 
   1929 	/*
   1930 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1931 	 * so that the payload after the Ethernet header is aligned
   1932 	 * to a 4-byte boundary.
   1933 
   1934 	 * XXX BRAINDAMAGE ALERT!
   1935 	 * The stupid chip uses the same size for every buffer, which
   1936 	 * is set in the Receive Control register.  We are using the 2K
   1937 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1938 	 * reason, we can't "scoot" packets longer than the standard
   1939 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1940 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1941 	 * the upper layer copy the headers.
   1942 	 */
   1943 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1944 
   1945 	if (sc->sc_type == WM_T_82574) {
   1946 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1947 		rxd->erx_data.erxd_addr =
   1948 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1949 		rxd->erx_data.erxd_dd = 0;
   1950 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1951 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1952 
   1953 		rxd->nqrx_data.nrxd_paddr =
   1954 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1955 		/* Currently, split header is not supported. */
   1956 		rxd->nqrx_data.nrxd_haddr = 0;
   1957 	} else {
   1958 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1959 
   1960 		wm_set_dma_addr(&rxd->wrx_addr,
   1961 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1962 		rxd->wrx_len = 0;
   1963 		rxd->wrx_cksum = 0;
   1964 		rxd->wrx_status = 0;
   1965 		rxd->wrx_errors = 0;
   1966 		rxd->wrx_special = 0;
   1967 	}
   1968 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1969 
   1970 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1971 }
   1972 
   1973 /*
   1974  * Device driver interface functions and commonly used functions.
   1975  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1976  */
   1977 
   1978 /* Lookup supported device table */
   1979 static const struct wm_product *
   1980 wm_lookup(const struct pci_attach_args *pa)
   1981 {
   1982 	const struct wm_product *wmp;
   1983 
   1984 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1985 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1986 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1987 			return wmp;
   1988 	}
   1989 	return NULL;
   1990 }
   1991 
   1992 /* The match function (ca_match) */
   1993 static int
   1994 wm_match(device_t parent, cfdata_t cf, void *aux)
   1995 {
   1996 	struct pci_attach_args *pa = aux;
   1997 
   1998 	if (wm_lookup(pa) != NULL)
   1999 		return 1;
   2000 
   2001 	return 0;
   2002 }
   2003 
   2004 /* The attach function (ca_attach) */
   2005 static void
   2006 wm_attach(device_t parent, device_t self, void *aux)
   2007 {
   2008 	struct wm_softc *sc = device_private(self);
   2009 	struct pci_attach_args *pa = aux;
   2010 	prop_dictionary_t dict;
   2011 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2012 	pci_chipset_tag_t pc = pa->pa_pc;
   2013 	int counts[PCI_INTR_TYPE_SIZE];
   2014 	pci_intr_type_t max_type;
   2015 	const char *eetype, *xname;
   2016 	bus_space_tag_t memt;
   2017 	bus_space_handle_t memh;
   2018 	bus_size_t memsize;
   2019 	int memh_valid;
   2020 	int i, error;
   2021 	const struct wm_product *wmp;
   2022 	prop_data_t ea;
   2023 	prop_number_t pn;
   2024 	uint8_t enaddr[ETHER_ADDR_LEN];
   2025 	char buf[256];
   2026 	char wqname[MAXCOMLEN];
   2027 	uint16_t cfg1, cfg2, swdpin, nvmword;
   2028 	pcireg_t preg, memtype;
   2029 	uint16_t eeprom_data, apme_mask;
   2030 	bool force_clear_smbi;
   2031 	uint32_t link_mode;
   2032 	uint32_t reg;
   2033 
   2034 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
   2035 	sc->sc_debug = WM_DEBUG_DEFAULT;
   2036 #endif
   2037 	sc->sc_dev = self;
   2038 	callout_init(&sc->sc_tick_ch, CALLOUT_MPSAFE);
   2039 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   2040 	sc->sc_core_stopping = false;
   2041 
   2042 	wmp = wm_lookup(pa);
   2043 #ifdef DIAGNOSTIC
   2044 	if (wmp == NULL) {
   2045 		printf("\n");
   2046 		panic("wm_attach: impossible");
   2047 	}
   2048 #endif
   2049 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   2050 
   2051 	sc->sc_pc = pa->pa_pc;
   2052 	sc->sc_pcitag = pa->pa_tag;
   2053 
   2054 	if (pci_dma64_available(pa)) {
   2055 		aprint_verbose(", 64-bit DMA");
   2056 		sc->sc_dmat = pa->pa_dmat64;
   2057 	} else {
   2058 		aprint_verbose(", 32-bit DMA");
   2059 		sc->sc_dmat = pa->pa_dmat;
   2060 	}
   2061 
   2062 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   2063 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   2064 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   2065 
   2066 	sc->sc_type = wmp->wmp_type;
   2067 
   2068 	/* Set default function pointers */
   2069 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   2070 	sc->phy.release = sc->nvm.release = wm_put_null;
   2071 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   2072 
   2073 	if (sc->sc_type < WM_T_82543) {
   2074 		if (sc->sc_rev < 2) {
   2075 			aprint_error_dev(sc->sc_dev,
   2076 			    "i82542 must be at least rev. 2\n");
   2077 			return;
   2078 		}
   2079 		if (sc->sc_rev < 3)
   2080 			sc->sc_type = WM_T_82542_2_0;
   2081 	}
   2082 
   2083 	/*
   2084 	 * Disable MSI for Errata:
   2085 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   2086 	 *
   2087 	 *  82544: Errata 25
   2088 	 *  82540: Errata  6 (easy to reproduce device timeout)
   2089 	 *  82545: Errata  4 (easy to reproduce device timeout)
   2090 	 *  82546: Errata 26 (easy to reproduce device timeout)
   2091 	 *  82541: Errata  7 (easy to reproduce device timeout)
   2092 	 *
   2093 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   2094 	 *
   2095 	 *  82571 & 82572: Errata 63
   2096 	 */
   2097 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   2098 	    || (sc->sc_type == WM_T_82572))
   2099 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   2100 
   2101 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2102 	    || (sc->sc_type == WM_T_82580)
   2103 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   2104 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   2105 		sc->sc_flags |= WM_F_NEWQUEUE;
   2106 
   2107 	/* Set device properties (mactype) */
   2108 	dict = device_properties(sc->sc_dev);
   2109 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   2110 
   2111 	/*
   2112 	 * Map the device.  All devices support memory-mapped acccess,
   2113 	 * and it is really required for normal operation.
   2114 	 */
   2115 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   2116 	switch (memtype) {
   2117 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   2118 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   2119 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   2120 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   2121 		break;
   2122 	default:
   2123 		memh_valid = 0;
   2124 		break;
   2125 	}
   2126 
   2127 	if (memh_valid) {
   2128 		sc->sc_st = memt;
   2129 		sc->sc_sh = memh;
   2130 		sc->sc_ss = memsize;
   2131 	} else {
   2132 		aprint_error_dev(sc->sc_dev,
   2133 		    "unable to map device registers\n");
   2134 		return;
   2135 	}
   2136 
   2137 	/*
   2138 	 * In addition, i82544 and later support I/O mapped indirect
   2139 	 * register access.  It is not desirable (nor supported in
   2140 	 * this driver) to use it for normal operation, though it is
   2141 	 * required to work around bugs in some chip versions.
   2142 	 */
   2143 	switch (sc->sc_type) {
   2144 	case WM_T_82544:
   2145 	case WM_T_82541:
   2146 	case WM_T_82541_2:
   2147 	case WM_T_82547:
   2148 	case WM_T_82547_2:
   2149 		/* First we have to find the I/O BAR. */
   2150 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   2151 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   2152 			if (memtype == PCI_MAPREG_TYPE_IO)
   2153 				break;
   2154 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   2155 			    PCI_MAPREG_MEM_TYPE_64BIT)
   2156 				i += 4;	/* skip high bits, too */
   2157 		}
   2158 		if (i < PCI_MAPREG_END) {
   2159 			/*
   2160 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   2161 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   2162 			 * It's no problem because newer chips has no this
   2163 			 * bug.
   2164 			 *
   2165 			 * The i8254x doesn't apparently respond when the
   2166 			 * I/O BAR is 0, which looks somewhat like it's not
   2167 			 * been configured.
   2168 			 */
   2169 			preg = pci_conf_read(pc, pa->pa_tag, i);
   2170 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   2171 				aprint_error_dev(sc->sc_dev,
   2172 				    "WARNING: I/O BAR at zero.\n");
   2173 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   2174 			    0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)
   2175 			    == 0) {
   2176 				sc->sc_flags |= WM_F_IOH_VALID;
   2177 			} else
   2178 				aprint_error_dev(sc->sc_dev,
   2179 				    "WARNING: unable to map I/O space\n");
   2180 		}
   2181 		break;
   2182 	default:
   2183 		break;
   2184 	}
   2185 
   2186 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   2187 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   2188 	preg |= PCI_COMMAND_MASTER_ENABLE;
   2189 	if (sc->sc_type < WM_T_82542_2_1)
   2190 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   2191 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   2192 
   2193 	/* Power up chip */
   2194 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   2195 	    && error != EOPNOTSUPP) {
   2196 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   2197 		return;
   2198 	}
   2199 
   2200 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   2201 	/*
   2202 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   2203 	 * resource.
   2204 	 */
   2205 	if (sc->sc_nqueues > 1) {
   2206 		max_type = PCI_INTR_TYPE_MSIX;
   2207 		/*
   2208 		 *  82583 has a MSI-X capability in the PCI configuration space
   2209 		 * but it doesn't support it. At least the document doesn't
   2210 		 * say anything about MSI-X.
   2211 		 */
   2212 		counts[PCI_INTR_TYPE_MSIX]
   2213 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2214 	} else {
   2215 		max_type = PCI_INTR_TYPE_MSI;
   2216 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2217 	}
   2218 
   2219 	/* Allocation settings */
   2220 	counts[PCI_INTR_TYPE_MSI] = 1;
   2221 	counts[PCI_INTR_TYPE_INTX] = 1;
   2222 	/* overridden by disable flags */
   2223 	if (wm_disable_msi != 0) {
   2224 		counts[PCI_INTR_TYPE_MSI] = 0;
   2225 		if (wm_disable_msix != 0) {
   2226 			max_type = PCI_INTR_TYPE_INTX;
   2227 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2228 		}
   2229 	} else if (wm_disable_msix != 0) {
   2230 		max_type = PCI_INTR_TYPE_MSI;
   2231 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2232 	}
   2233 
   2234 alloc_retry:
   2235 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2236 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2237 		return;
   2238 	}
   2239 
   2240 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2241 		error = wm_setup_msix(sc);
   2242 		if (error) {
   2243 			pci_intr_release(pc, sc->sc_intrs,
   2244 			    counts[PCI_INTR_TYPE_MSIX]);
   2245 
   2246 			/* Setup for MSI: Disable MSI-X */
   2247 			max_type = PCI_INTR_TYPE_MSI;
   2248 			counts[PCI_INTR_TYPE_MSI] = 1;
   2249 			counts[PCI_INTR_TYPE_INTX] = 1;
   2250 			goto alloc_retry;
   2251 		}
   2252 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2253 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2254 		error = wm_setup_legacy(sc);
   2255 		if (error) {
   2256 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2257 			    counts[PCI_INTR_TYPE_MSI]);
   2258 
   2259 			/* The next try is for INTx: Disable MSI */
   2260 			max_type = PCI_INTR_TYPE_INTX;
   2261 			counts[PCI_INTR_TYPE_INTX] = 1;
   2262 			goto alloc_retry;
   2263 		}
   2264 	} else {
   2265 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2266 		error = wm_setup_legacy(sc);
   2267 		if (error) {
   2268 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2269 			    counts[PCI_INTR_TYPE_INTX]);
   2270 			return;
   2271 		}
   2272 	}
   2273 
   2274 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2275 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2276 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2277 	    WQ_PERCPU | WQ_MPSAFE);
   2278 	if (error) {
   2279 		aprint_error_dev(sc->sc_dev,
   2280 		    "unable to create TxRx workqueue\n");
   2281 		goto out;
   2282 	}
   2283 
   2284 	snprintf(wqname, sizeof(wqname), "%sReset", device_xname(sc->sc_dev));
   2285 	error = workqueue_create(&sc->sc_reset_wq, wqname,
   2286 	    wm_handle_reset_work, sc, WM_WORKQUEUE_PRI, IPL_SOFTCLOCK,
   2287 	    WQ_MPSAFE);
   2288 	if (error) {
   2289 		workqueue_destroy(sc->sc_queue_wq);
   2290 		aprint_error_dev(sc->sc_dev,
   2291 		    "unable to create reset workqueue\n");
   2292 		goto out;
   2293 	}
   2294 
   2295 	/*
   2296 	 * Check the function ID (unit number of the chip).
   2297 	 */
   2298 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2299 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2300 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2301 	    || (sc->sc_type == WM_T_82580)
   2302 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2303 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2304 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2305 	else
   2306 		sc->sc_funcid = 0;
   2307 
   2308 	/*
   2309 	 * Determine a few things about the bus we're connected to.
   2310 	 */
   2311 	if (sc->sc_type < WM_T_82543) {
   2312 		/* We don't really know the bus characteristics here. */
   2313 		sc->sc_bus_speed = 33;
   2314 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2315 		/*
   2316 		 * CSA (Communication Streaming Architecture) is about as fast
   2317 		 * a 32-bit 66MHz PCI Bus.
   2318 		 */
   2319 		sc->sc_flags |= WM_F_CSA;
   2320 		sc->sc_bus_speed = 66;
   2321 		aprint_verbose_dev(sc->sc_dev,
   2322 		    "Communication Streaming Architecture\n");
   2323 		if (sc->sc_type == WM_T_82547) {
   2324 			callout_init(&sc->sc_txfifo_ch, CALLOUT_MPSAFE);
   2325 			callout_setfunc(&sc->sc_txfifo_ch,
   2326 			    wm_82547_txfifo_stall, sc);
   2327 			aprint_verbose_dev(sc->sc_dev,
   2328 			    "using 82547 Tx FIFO stall work-around\n");
   2329 		}
   2330 	} else if (sc->sc_type >= WM_T_82571) {
   2331 		sc->sc_flags |= WM_F_PCIE;
   2332 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2333 		    && (sc->sc_type != WM_T_ICH10)
   2334 		    && (sc->sc_type != WM_T_PCH)
   2335 		    && (sc->sc_type != WM_T_PCH2)
   2336 		    && (sc->sc_type != WM_T_PCH_LPT)
   2337 		    && (sc->sc_type != WM_T_PCH_SPT)
   2338 		    && (sc->sc_type != WM_T_PCH_CNP)
   2339 		    && (sc->sc_type != WM_T_PCH_TGP)) {
   2340 			/* ICH* and PCH* have no PCIe capability registers */
   2341 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2342 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2343 				NULL) == 0)
   2344 				aprint_error_dev(sc->sc_dev,
   2345 				    "unable to find PCIe capability\n");
   2346 		}
   2347 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2348 	} else {
   2349 		reg = CSR_READ(sc, WMREG_STATUS);
   2350 		if (reg & STATUS_BUS64)
   2351 			sc->sc_flags |= WM_F_BUS64;
   2352 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2353 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2354 
   2355 			sc->sc_flags |= WM_F_PCIX;
   2356 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2357 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2358 				aprint_error_dev(sc->sc_dev,
   2359 				    "unable to find PCIX capability\n");
   2360 			else if (sc->sc_type != WM_T_82545_3 &&
   2361 			    sc->sc_type != WM_T_82546_3) {
   2362 				/*
   2363 				 * Work around a problem caused by the BIOS
   2364 				 * setting the max memory read byte count
   2365 				 * incorrectly.
   2366 				 */
   2367 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2368 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2369 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2370 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2371 
   2372 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2373 				    PCIX_CMD_BYTECNT_SHIFT;
   2374 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2375 				    PCIX_STATUS_MAXB_SHIFT;
   2376 				if (bytecnt > maxb) {
   2377 					aprint_verbose_dev(sc->sc_dev,
   2378 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2379 					    512 << bytecnt, 512 << maxb);
   2380 					pcix_cmd = (pcix_cmd &
   2381 					    ~PCIX_CMD_BYTECNT_MASK) |
   2382 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2383 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2384 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2385 					    pcix_cmd);
   2386 				}
   2387 			}
   2388 		}
   2389 		/*
   2390 		 * The quad port adapter is special; it has a PCIX-PCIX
   2391 		 * bridge on the board, and can run the secondary bus at
   2392 		 * a higher speed.
   2393 		 */
   2394 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2395 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2396 								      : 66;
   2397 		} else if (sc->sc_flags & WM_F_PCIX) {
   2398 			switch (reg & STATUS_PCIXSPD_MASK) {
   2399 			case STATUS_PCIXSPD_50_66:
   2400 				sc->sc_bus_speed = 66;
   2401 				break;
   2402 			case STATUS_PCIXSPD_66_100:
   2403 				sc->sc_bus_speed = 100;
   2404 				break;
   2405 			case STATUS_PCIXSPD_100_133:
   2406 				sc->sc_bus_speed = 133;
   2407 				break;
   2408 			default:
   2409 				aprint_error_dev(sc->sc_dev,
   2410 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2411 				    reg & STATUS_PCIXSPD_MASK);
   2412 				sc->sc_bus_speed = 66;
   2413 				break;
   2414 			}
   2415 		} else
   2416 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2417 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2418 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2419 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2420 	}
   2421 
   2422 	/* clear interesting stat counters */
   2423 	CSR_READ(sc, WMREG_COLC);
   2424 	CSR_READ(sc, WMREG_RXERRC);
   2425 
   2426 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2427 	    || (sc->sc_type >= WM_T_ICH8))
   2428 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2429 	if (sc->sc_type >= WM_T_ICH8)
   2430 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2431 
   2432 	/* Set PHY, NVM mutex related stuff */
   2433 	switch (sc->sc_type) {
   2434 	case WM_T_82542_2_0:
   2435 	case WM_T_82542_2_1:
   2436 	case WM_T_82543:
   2437 	case WM_T_82544:
   2438 		/* Microwire */
   2439 		sc->nvm.read = wm_nvm_read_uwire;
   2440 		sc->sc_nvm_wordsize = 64;
   2441 		sc->sc_nvm_addrbits = 6;
   2442 		break;
   2443 	case WM_T_82540:
   2444 	case WM_T_82545:
   2445 	case WM_T_82545_3:
   2446 	case WM_T_82546:
   2447 	case WM_T_82546_3:
   2448 		/* Microwire */
   2449 		sc->nvm.read = wm_nvm_read_uwire;
   2450 		reg = CSR_READ(sc, WMREG_EECD);
   2451 		if (reg & EECD_EE_SIZE) {
   2452 			sc->sc_nvm_wordsize = 256;
   2453 			sc->sc_nvm_addrbits = 8;
   2454 		} else {
   2455 			sc->sc_nvm_wordsize = 64;
   2456 			sc->sc_nvm_addrbits = 6;
   2457 		}
   2458 		sc->sc_flags |= WM_F_LOCK_EECD;
   2459 		sc->nvm.acquire = wm_get_eecd;
   2460 		sc->nvm.release = wm_put_eecd;
   2461 		break;
   2462 	case WM_T_82541:
   2463 	case WM_T_82541_2:
   2464 	case WM_T_82547:
   2465 	case WM_T_82547_2:
   2466 		reg = CSR_READ(sc, WMREG_EECD);
   2467 		/*
   2468 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2469 		 * on 8254[17], so set flags and functios before calling it.
   2470 		 */
   2471 		sc->sc_flags |= WM_F_LOCK_EECD;
   2472 		sc->nvm.acquire = wm_get_eecd;
   2473 		sc->nvm.release = wm_put_eecd;
   2474 		if (reg & EECD_EE_TYPE) {
   2475 			/* SPI */
   2476 			sc->nvm.read = wm_nvm_read_spi;
   2477 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2478 			wm_nvm_set_addrbits_size_eecd(sc);
   2479 		} else {
   2480 			/* Microwire */
   2481 			sc->nvm.read = wm_nvm_read_uwire;
   2482 			if ((reg & EECD_EE_ABITS) != 0) {
   2483 				sc->sc_nvm_wordsize = 256;
   2484 				sc->sc_nvm_addrbits = 8;
   2485 			} else {
   2486 				sc->sc_nvm_wordsize = 64;
   2487 				sc->sc_nvm_addrbits = 6;
   2488 			}
   2489 		}
   2490 		break;
   2491 	case WM_T_82571:
   2492 	case WM_T_82572:
   2493 		/* SPI */
   2494 		sc->nvm.read = wm_nvm_read_eerd;
   2495 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2496 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2497 		wm_nvm_set_addrbits_size_eecd(sc);
   2498 		sc->phy.acquire = wm_get_swsm_semaphore;
   2499 		sc->phy.release = wm_put_swsm_semaphore;
   2500 		sc->nvm.acquire = wm_get_nvm_82571;
   2501 		sc->nvm.release = wm_put_nvm_82571;
   2502 		break;
   2503 	case WM_T_82573:
   2504 	case WM_T_82574:
   2505 	case WM_T_82583:
   2506 		sc->nvm.read = wm_nvm_read_eerd;
   2507 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2508 		if (sc->sc_type == WM_T_82573) {
   2509 			sc->phy.acquire = wm_get_swsm_semaphore;
   2510 			sc->phy.release = wm_put_swsm_semaphore;
   2511 			sc->nvm.acquire = wm_get_nvm_82571;
   2512 			sc->nvm.release = wm_put_nvm_82571;
   2513 		} else {
   2514 			/* Both PHY and NVM use the same semaphore. */
   2515 			sc->phy.acquire = sc->nvm.acquire
   2516 			    = wm_get_swfwhw_semaphore;
   2517 			sc->phy.release = sc->nvm.release
   2518 			    = wm_put_swfwhw_semaphore;
   2519 		}
   2520 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2521 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2522 			sc->sc_nvm_wordsize = 2048;
   2523 		} else {
   2524 			/* SPI */
   2525 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2526 			wm_nvm_set_addrbits_size_eecd(sc);
   2527 		}
   2528 		break;
   2529 	case WM_T_82575:
   2530 	case WM_T_82576:
   2531 	case WM_T_82580:
   2532 	case WM_T_I350:
   2533 	case WM_T_I354:
   2534 	case WM_T_80003:
   2535 		/* SPI */
   2536 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2537 		wm_nvm_set_addrbits_size_eecd(sc);
   2538 		if ((sc->sc_type == WM_T_80003)
   2539 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2540 			sc->nvm.read = wm_nvm_read_eerd;
   2541 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2542 		} else {
   2543 			sc->nvm.read = wm_nvm_read_spi;
   2544 			sc->sc_flags |= WM_F_LOCK_EECD;
   2545 		}
   2546 		sc->phy.acquire = wm_get_phy_82575;
   2547 		sc->phy.release = wm_put_phy_82575;
   2548 		sc->nvm.acquire = wm_get_nvm_80003;
   2549 		sc->nvm.release = wm_put_nvm_80003;
   2550 		break;
   2551 	case WM_T_ICH8:
   2552 	case WM_T_ICH9:
   2553 	case WM_T_ICH10:
   2554 	case WM_T_PCH:
   2555 	case WM_T_PCH2:
   2556 	case WM_T_PCH_LPT:
   2557 		sc->nvm.read = wm_nvm_read_ich8;
   2558 		/* FLASH */
   2559 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2560 		sc->sc_nvm_wordsize = 2048;
   2561 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2562 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2563 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2564 			aprint_error_dev(sc->sc_dev,
   2565 			    "can't map FLASH registers\n");
   2566 			goto out;
   2567 		}
   2568 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2569 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2570 		    ICH_FLASH_SECTOR_SIZE;
   2571 		sc->sc_ich8_flash_bank_size =
   2572 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2573 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2574 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2575 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2576 		sc->sc_flashreg_offset = 0;
   2577 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2578 		sc->phy.release = wm_put_swflag_ich8lan;
   2579 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2580 		sc->nvm.release = wm_put_nvm_ich8lan;
   2581 		break;
   2582 	case WM_T_PCH_SPT:
   2583 	case WM_T_PCH_CNP:
   2584 	case WM_T_PCH_TGP:
   2585 		sc->nvm.read = wm_nvm_read_spt;
   2586 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2587 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2588 		sc->sc_flasht = sc->sc_st;
   2589 		sc->sc_flashh = sc->sc_sh;
   2590 		sc->sc_ich8_flash_base = 0;
   2591 		sc->sc_nvm_wordsize =
   2592 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2593 		    * NVM_SIZE_MULTIPLIER;
   2594 		/* It is size in bytes, we want words */
   2595 		sc->sc_nvm_wordsize /= 2;
   2596 		/* Assume 2 banks */
   2597 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2598 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2599 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2600 		sc->phy.release = wm_put_swflag_ich8lan;
   2601 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2602 		sc->nvm.release = wm_put_nvm_ich8lan;
   2603 		break;
   2604 	case WM_T_I210:
   2605 	case WM_T_I211:
   2606 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2607 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2608 		if (wm_nvm_flash_presence_i210(sc)) {
   2609 			sc->nvm.read = wm_nvm_read_eerd;
   2610 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2611 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2612 			wm_nvm_set_addrbits_size_eecd(sc);
   2613 		} else {
   2614 			sc->nvm.read = wm_nvm_read_invm;
   2615 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2616 			sc->sc_nvm_wordsize = INVM_SIZE;
   2617 		}
   2618 		sc->phy.acquire = wm_get_phy_82575;
   2619 		sc->phy.release = wm_put_phy_82575;
   2620 		sc->nvm.acquire = wm_get_nvm_80003;
   2621 		sc->nvm.release = wm_put_nvm_80003;
   2622 		break;
   2623 	default:
   2624 		break;
   2625 	}
   2626 
   2627 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2628 	switch (sc->sc_type) {
   2629 	case WM_T_82571:
   2630 	case WM_T_82572:
   2631 		reg = CSR_READ(sc, WMREG_SWSM2);
   2632 		if ((reg & SWSM2_LOCK) == 0) {
   2633 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2634 			force_clear_smbi = true;
   2635 		} else
   2636 			force_clear_smbi = false;
   2637 		break;
   2638 	case WM_T_82573:
   2639 	case WM_T_82574:
   2640 	case WM_T_82583:
   2641 		force_clear_smbi = true;
   2642 		break;
   2643 	default:
   2644 		force_clear_smbi = false;
   2645 		break;
   2646 	}
   2647 	if (force_clear_smbi) {
   2648 		reg = CSR_READ(sc, WMREG_SWSM);
   2649 		if ((reg & SWSM_SMBI) != 0)
   2650 			aprint_error_dev(sc->sc_dev,
   2651 			    "Please update the Bootagent\n");
   2652 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2653 	}
   2654 
   2655 	/*
   2656 	 * Defer printing the EEPROM type until after verifying the checksum
   2657 	 * This allows the EEPROM type to be printed correctly in the case
   2658 	 * that no EEPROM is attached.
   2659 	 */
   2660 	/*
   2661 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2662 	 * this for later, so we can fail future reads from the EEPROM.
   2663 	 */
   2664 	if (wm_nvm_validate_checksum(sc)) {
   2665 		/*
   2666 		 * Read twice again because some PCI-e parts fail the
   2667 		 * first check due to the link being in sleep state.
   2668 		 */
   2669 		if (wm_nvm_validate_checksum(sc))
   2670 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2671 	}
   2672 
   2673 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2674 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2675 	else {
   2676 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2677 		    sc->sc_nvm_wordsize);
   2678 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2679 			aprint_verbose("iNVM");
   2680 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2681 			aprint_verbose("FLASH(HW)");
   2682 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2683 			aprint_verbose("FLASH");
   2684 		else {
   2685 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2686 				eetype = "SPI";
   2687 			else
   2688 				eetype = "MicroWire";
   2689 			aprint_verbose("(%d address bits) %s EEPROM",
   2690 			    sc->sc_nvm_addrbits, eetype);
   2691 		}
   2692 	}
   2693 	wm_nvm_version(sc);
   2694 	aprint_verbose("\n");
   2695 
   2696 	/*
   2697 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2698 	 * incorrect.
   2699 	 */
   2700 	wm_gmii_setup_phytype(sc, 0, 0);
   2701 
   2702 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2703 	switch (sc->sc_type) {
   2704 	case WM_T_ICH8:
   2705 	case WM_T_ICH9:
   2706 	case WM_T_ICH10:
   2707 	case WM_T_PCH:
   2708 	case WM_T_PCH2:
   2709 	case WM_T_PCH_LPT:
   2710 	case WM_T_PCH_SPT:
   2711 	case WM_T_PCH_CNP:
   2712 	case WM_T_PCH_TGP:
   2713 		apme_mask = WUC_APME;
   2714 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2715 		if ((eeprom_data & apme_mask) != 0)
   2716 			sc->sc_flags |= WM_F_WOL;
   2717 		break;
   2718 	default:
   2719 		break;
   2720 	}
   2721 
   2722 	/* Reset the chip to a known state. */
   2723 	wm_reset(sc);
   2724 
   2725 	/* sc->sc_pba is set in wm_reset(). */
   2726 	aprint_verbose_dev(sc->sc_dev, "RX packet buffer size: %uKB\n",
   2727 	    sc->sc_pba);
   2728 
   2729 	/*
   2730 	 * Check for I21[01] PLL workaround.
   2731 	 *
   2732 	 * Three cases:
   2733 	 * a) Chip is I211.
   2734 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2735 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2736 	 */
   2737 	if (sc->sc_type == WM_T_I211)
   2738 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2739 	if (sc->sc_type == WM_T_I210) {
   2740 		if (!wm_nvm_flash_presence_i210(sc))
   2741 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2742 		else if ((sc->sc_nvm_ver_major < 3)
   2743 		    || ((sc->sc_nvm_ver_major == 3)
   2744 			&& (sc->sc_nvm_ver_minor < 25))) {
   2745 			aprint_verbose_dev(sc->sc_dev,
   2746 			    "ROM image version %d.%d is older than 3.25\n",
   2747 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2748 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2749 		}
   2750 	}
   2751 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2752 		wm_pll_workaround_i210(sc);
   2753 
   2754 	wm_get_wakeup(sc);
   2755 
   2756 	/* Non-AMT based hardware can now take control from firmware */
   2757 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2758 		wm_get_hw_control(sc);
   2759 
   2760 	/*
   2761 	 * Read the Ethernet address from the EEPROM, if not first found
   2762 	 * in device properties.
   2763 	 */
   2764 	ea = prop_dictionary_get(dict, "mac-address");
   2765 	if (ea != NULL) {
   2766 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2767 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2768 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2769 	} else {
   2770 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2771 			aprint_error_dev(sc->sc_dev,
   2772 			    "unable to read Ethernet address\n");
   2773 			goto out;
   2774 		}
   2775 	}
   2776 
   2777 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2778 	    ether_sprintf(enaddr));
   2779 
   2780 	/*
   2781 	 * Read the config info from the EEPROM, and set up various
   2782 	 * bits in the control registers based on their contents.
   2783 	 */
   2784 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2785 	if (pn != NULL) {
   2786 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2787 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2788 	} else {
   2789 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2790 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2791 			goto out;
   2792 		}
   2793 	}
   2794 
   2795 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2796 	if (pn != NULL) {
   2797 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2798 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2799 	} else {
   2800 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2801 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2802 			goto out;
   2803 		}
   2804 	}
   2805 
   2806 	/* check for WM_F_WOL */
   2807 	switch (sc->sc_type) {
   2808 	case WM_T_82542_2_0:
   2809 	case WM_T_82542_2_1:
   2810 	case WM_T_82543:
   2811 		/* dummy? */
   2812 		eeprom_data = 0;
   2813 		apme_mask = NVM_CFG3_APME;
   2814 		break;
   2815 	case WM_T_82544:
   2816 		apme_mask = NVM_CFG2_82544_APM_EN;
   2817 		eeprom_data = cfg2;
   2818 		break;
   2819 	case WM_T_82546:
   2820 	case WM_T_82546_3:
   2821 	case WM_T_82571:
   2822 	case WM_T_82572:
   2823 	case WM_T_82573:
   2824 	case WM_T_82574:
   2825 	case WM_T_82583:
   2826 	case WM_T_80003:
   2827 	case WM_T_82575:
   2828 	case WM_T_82576:
   2829 		apme_mask = NVM_CFG3_APME;
   2830 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2831 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2832 		break;
   2833 	case WM_T_82580:
   2834 	case WM_T_I350:
   2835 	case WM_T_I354:
   2836 	case WM_T_I210:
   2837 	case WM_T_I211:
   2838 		apme_mask = NVM_CFG3_APME;
   2839 		wm_nvm_read(sc,
   2840 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2841 		    1, &eeprom_data);
   2842 		break;
   2843 	case WM_T_ICH8:
   2844 	case WM_T_ICH9:
   2845 	case WM_T_ICH10:
   2846 	case WM_T_PCH:
   2847 	case WM_T_PCH2:
   2848 	case WM_T_PCH_LPT:
   2849 	case WM_T_PCH_SPT:
   2850 	case WM_T_PCH_CNP:
   2851 	case WM_T_PCH_TGP:
   2852 		/* Already checked before wm_reset () */
   2853 		apme_mask = eeprom_data = 0;
   2854 		break;
   2855 	default: /* XXX 82540 */
   2856 		apme_mask = NVM_CFG3_APME;
   2857 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2858 		break;
   2859 	}
   2860 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2861 	if ((eeprom_data & apme_mask) != 0)
   2862 		sc->sc_flags |= WM_F_WOL;
   2863 
   2864 	/*
   2865 	 * We have the eeprom settings, now apply the special cases
   2866 	 * where the eeprom may be wrong or the board won't support
   2867 	 * wake on lan on a particular port
   2868 	 */
   2869 	switch (sc->sc_pcidevid) {
   2870 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2871 		sc->sc_flags &= ~WM_F_WOL;
   2872 		break;
   2873 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2874 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2875 		/* Wake events only supported on port A for dual fiber
   2876 		 * regardless of eeprom setting */
   2877 		if (sc->sc_funcid == 1)
   2878 			sc->sc_flags &= ~WM_F_WOL;
   2879 		break;
   2880 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2881 		/* If quad port adapter, disable WoL on all but port A */
   2882 		if (sc->sc_funcid != 0)
   2883 			sc->sc_flags &= ~WM_F_WOL;
   2884 		break;
   2885 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2886 		/* Wake events only supported on port A for dual fiber
   2887 		 * regardless of eeprom setting */
   2888 		if (sc->sc_funcid == 1)
   2889 			sc->sc_flags &= ~WM_F_WOL;
   2890 		break;
   2891 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2892 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2893 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2894 		/* If quad port adapter, disable WoL on all but port A */
   2895 		if (sc->sc_funcid != 0)
   2896 			sc->sc_flags &= ~WM_F_WOL;
   2897 		break;
   2898 	}
   2899 
   2900 	if (sc->sc_type >= WM_T_82575) {
   2901 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2902 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2903 			    nvmword);
   2904 			if ((sc->sc_type == WM_T_82575) ||
   2905 			    (sc->sc_type == WM_T_82576)) {
   2906 				/* Check NVM for autonegotiation */
   2907 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2908 				    != 0)
   2909 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2910 			}
   2911 			if ((sc->sc_type == WM_T_82575) ||
   2912 			    (sc->sc_type == WM_T_I350)) {
   2913 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2914 					sc->sc_flags |= WM_F_MAS;
   2915 			}
   2916 		}
   2917 	}
   2918 
   2919 	/*
   2920 	 * XXX need special handling for some multiple port cards
   2921 	 * to disable a paticular port.
   2922 	 */
   2923 
   2924 	if (sc->sc_type >= WM_T_82544) {
   2925 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2926 		if (pn != NULL) {
   2927 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2928 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2929 		} else {
   2930 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2931 				aprint_error_dev(sc->sc_dev,
   2932 				    "unable to read SWDPIN\n");
   2933 				goto out;
   2934 			}
   2935 		}
   2936 	}
   2937 
   2938 	if (cfg1 & NVM_CFG1_ILOS)
   2939 		sc->sc_ctrl |= CTRL_ILOS;
   2940 
   2941 	/*
   2942 	 * XXX
   2943 	 * This code isn't correct because pin 2 and 3 are located
   2944 	 * in different position on newer chips. Check all datasheet.
   2945 	 *
   2946 	 * Until resolve this problem, check if a chip < 82580
   2947 	 */
   2948 	if (sc->sc_type <= WM_T_82580) {
   2949 		if (sc->sc_type >= WM_T_82544) {
   2950 			sc->sc_ctrl |=
   2951 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2952 			    CTRL_SWDPIO_SHIFT;
   2953 			sc->sc_ctrl |=
   2954 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2955 			    CTRL_SWDPINS_SHIFT;
   2956 		} else {
   2957 			sc->sc_ctrl |=
   2958 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2959 			    CTRL_SWDPIO_SHIFT;
   2960 		}
   2961 	}
   2962 
   2963 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2964 		wm_nvm_read(sc,
   2965 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2966 		    1, &nvmword);
   2967 		if (nvmword & NVM_CFG3_ILOS)
   2968 			sc->sc_ctrl |= CTRL_ILOS;
   2969 	}
   2970 
   2971 #if 0
   2972 	if (sc->sc_type >= WM_T_82544) {
   2973 		if (cfg1 & NVM_CFG1_IPS0)
   2974 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2975 		if (cfg1 & NVM_CFG1_IPS1)
   2976 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2977 		sc->sc_ctrl_ext |=
   2978 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2979 		    CTRL_EXT_SWDPIO_SHIFT;
   2980 		sc->sc_ctrl_ext |=
   2981 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2982 		    CTRL_EXT_SWDPINS_SHIFT;
   2983 	} else {
   2984 		sc->sc_ctrl_ext |=
   2985 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2986 		    CTRL_EXT_SWDPIO_SHIFT;
   2987 	}
   2988 #endif
   2989 
   2990 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2991 #if 0
   2992 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2993 #endif
   2994 
   2995 	if (sc->sc_type == WM_T_PCH) {
   2996 		uint16_t val;
   2997 
   2998 		/* Save the NVM K1 bit setting */
   2999 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   3000 
   3001 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   3002 			sc->sc_nvm_k1_enabled = 1;
   3003 		else
   3004 			sc->sc_nvm_k1_enabled = 0;
   3005 	}
   3006 
   3007 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   3008 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   3009 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   3010 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   3011 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   3012 	    || sc->sc_type == WM_T_PCH_TGP
   3013 	    || sc->sc_type == WM_T_82573
   3014 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   3015 		/* Copper only */
   3016 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3017 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   3018 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   3019 	    || (sc->sc_type ==WM_T_I211)) {
   3020 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3021 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   3022 		switch (link_mode) {
   3023 		case CTRL_EXT_LINK_MODE_1000KX:
   3024 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   3025 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   3026 			break;
   3027 		case CTRL_EXT_LINK_MODE_SGMII:
   3028 			if (wm_sgmii_uses_mdio(sc)) {
   3029 				aprint_normal_dev(sc->sc_dev,
   3030 				    "SGMII(MDIO)\n");
   3031 				sc->sc_flags |= WM_F_SGMII;
   3032 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3033 				break;
   3034 			}
   3035 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   3036 			/*FALLTHROUGH*/
   3037 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   3038 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   3039 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   3040 				if (link_mode
   3041 				    == CTRL_EXT_LINK_MODE_SGMII) {
   3042 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3043 					sc->sc_flags |= WM_F_SGMII;
   3044 					aprint_verbose_dev(sc->sc_dev,
   3045 					    "SGMII\n");
   3046 				} else {
   3047 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   3048 					aprint_verbose_dev(sc->sc_dev,
   3049 					    "SERDES\n");
   3050 				}
   3051 				break;
   3052 			}
   3053 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   3054 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   3055 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   3056 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   3057 				sc->sc_flags |= WM_F_SGMII;
   3058 			}
   3059 			/* Do not change link mode for 100BaseFX */
   3060 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   3061 				break;
   3062 
   3063 			/* Change current link mode setting */
   3064 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   3065 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   3066 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   3067 			else
   3068 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   3069 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3070 			break;
   3071 		case CTRL_EXT_LINK_MODE_GMII:
   3072 		default:
   3073 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   3074 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3075 			break;
   3076 		}
   3077 
   3078 		reg &= ~CTRL_EXT_I2C_ENA;
   3079 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   3080 			reg |= CTRL_EXT_I2C_ENA;
   3081 		else
   3082 			reg &= ~CTRL_EXT_I2C_ENA;
   3083 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3084 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   3085 			if (!wm_sgmii_uses_mdio(sc))
   3086 				wm_gmii_setup_phytype(sc, 0, 0);
   3087 			wm_reset_mdicnfg_82580(sc);
   3088 		}
   3089 	} else if (sc->sc_type < WM_T_82543 ||
   3090 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   3091 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   3092 			aprint_error_dev(sc->sc_dev,
   3093 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   3094 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   3095 		}
   3096 	} else {
   3097 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   3098 			aprint_error_dev(sc->sc_dev,
   3099 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   3100 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3101 		}
   3102 	}
   3103 
   3104 	if (sc->sc_type >= WM_T_PCH2)
   3105 		sc->sc_flags |= WM_F_EEE;
   3106 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   3107 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   3108 		/* XXX: Need special handling for I354. (not yet) */
   3109 		if (sc->sc_type != WM_T_I354)
   3110 			sc->sc_flags |= WM_F_EEE;
   3111 	}
   3112 
   3113 	/*
   3114 	 * The I350 has a bug where it always strips the CRC whether
   3115 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   3116 	 */
   3117 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3118 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3119 		sc->sc_flags |= WM_F_CRC_STRIP;
   3120 
   3121 	/*
   3122 	 * Workaround for some chips to delay sending LINK_STATE_UP.
   3123 	 * Some systems can't send packet soon after linkup. See also
   3124 	 * wm_linkintr_gmii(), wm_tick() and wm_gmii_mediastatus().
   3125 	 */
   3126 	switch (sc->sc_type) {
   3127 	case WM_T_I350:
   3128 	case WM_T_I354:
   3129 	case WM_T_I210:
   3130 	case WM_T_I211:
   3131 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   3132 			sc->sc_flags |= WM_F_DELAY_LINKUP;
   3133 		break;
   3134 	default:
   3135 		break;
   3136 	}
   3137 
   3138 	/* Set device properties (macflags) */
   3139 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   3140 
   3141 	if (sc->sc_flags != 0) {
   3142 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   3143 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   3144 	}
   3145 
   3146 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   3147 
   3148 	/* Initialize the media structures accordingly. */
   3149 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   3150 		wm_gmii_mediainit(sc, wmp->wmp_product);
   3151 	else
   3152 		wm_tbi_mediainit(sc); /* All others */
   3153 
   3154 	ifp = &sc->sc_ethercom.ec_if;
   3155 	xname = device_xname(sc->sc_dev);
   3156 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   3157 	ifp->if_softc = sc;
   3158 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   3159 	ifp->if_extflags = IFEF_MPSAFE;
   3160 	ifp->if_ioctl = wm_ioctl;
   3161 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3162 		ifp->if_start = wm_nq_start;
   3163 		/*
   3164 		 * When the number of CPUs is one and the controller can use
   3165 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   3166 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   3167 		 * and the other is used for link status changing.
   3168 		 * In this situation, wm_nq_transmit() is disadvantageous
   3169 		 * because of wm_select_txqueue() and pcq(9) overhead.
   3170 		 */
   3171 		if (wm_is_using_multiqueue(sc))
   3172 			ifp->if_transmit = wm_nq_transmit;
   3173 	} else {
   3174 		ifp->if_start = wm_start;
   3175 		/*
   3176 		 * wm_transmit() has the same disadvantages as wm_nq_transmit()
   3177 		 * described above.
   3178 		 */
   3179 		if (wm_is_using_multiqueue(sc))
   3180 			ifp->if_transmit = wm_transmit;
   3181 	}
   3182 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   3183 	ifp->if_init = wm_init;
   3184 	ifp->if_stop = wm_stop;
   3185 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   3186 	IFQ_SET_READY(&ifp->if_snd);
   3187 
   3188 	/* Check for jumbo frame */
   3189 	switch (sc->sc_type) {
   3190 	case WM_T_82573:
   3191 		/* XXX limited to 9234 if ASPM is disabled */
   3192 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   3193 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   3194 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3195 		break;
   3196 	case WM_T_82571:
   3197 	case WM_T_82572:
   3198 	case WM_T_82574:
   3199 	case WM_T_82583:
   3200 	case WM_T_82575:
   3201 	case WM_T_82576:
   3202 	case WM_T_82580:
   3203 	case WM_T_I350:
   3204 	case WM_T_I354:
   3205 	case WM_T_I210:
   3206 	case WM_T_I211:
   3207 	case WM_T_80003:
   3208 	case WM_T_ICH9:
   3209 	case WM_T_ICH10:
   3210 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   3211 	case WM_T_PCH_LPT:
   3212 	case WM_T_PCH_SPT:
   3213 	case WM_T_PCH_CNP:
   3214 	case WM_T_PCH_TGP:
   3215 		/* XXX limited to 9234 */
   3216 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3217 		break;
   3218 	case WM_T_PCH:
   3219 		/* XXX limited to 4096 */
   3220 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3221 		break;
   3222 	case WM_T_82542_2_0:
   3223 	case WM_T_82542_2_1:
   3224 	case WM_T_ICH8:
   3225 		/* No support for jumbo frame */
   3226 		break;
   3227 	default:
   3228 		/* ETHER_MAX_LEN_JUMBO */
   3229 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3230 		break;
   3231 	}
   3232 
   3233 	/* If we're a i82543 or greater, we can support VLANs. */
   3234 	if (sc->sc_type >= WM_T_82543) {
   3235 		sc->sc_ethercom.ec_capabilities |=
   3236 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   3237 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   3238 	}
   3239 
   3240 	if ((sc->sc_flags & WM_F_EEE) != 0)
   3241 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   3242 
   3243 	/*
   3244 	 * We can perform TCPv4 and UDPv4 checksums in-bound.  Only
   3245 	 * on i82543 and later.
   3246 	 */
   3247 	if (sc->sc_type >= WM_T_82543) {
   3248 		ifp->if_capabilities |=
   3249 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3250 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3251 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3252 		    IFCAP_CSUM_TCPv6_Tx |
   3253 		    IFCAP_CSUM_UDPv6_Tx;
   3254 	}
   3255 
   3256 	/*
   3257 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3258 	 *
   3259 	 *	82541GI (8086:1076) ... no
   3260 	 *	82572EI (8086:10b9) ... yes
   3261 	 */
   3262 	if (sc->sc_type >= WM_T_82571) {
   3263 		ifp->if_capabilities |=
   3264 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3265 	}
   3266 
   3267 	/*
   3268 	 * If we're a i82544 or greater (except i82547), we can do
   3269 	 * TCP segmentation offload.
   3270 	 */
   3271 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547)
   3272 		ifp->if_capabilities |= IFCAP_TSOv4;
   3273 
   3274 	if (sc->sc_type >= WM_T_82571)
   3275 		ifp->if_capabilities |= IFCAP_TSOv6;
   3276 
   3277 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3278 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3279 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3280 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3281 
   3282 	/* Attach the interface. */
   3283 	if_initialize(ifp);
   3284 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3285 	ether_ifattach(ifp, enaddr);
   3286 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3287 	if_register(ifp);
   3288 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3289 	    RND_FLAG_DEFAULT);
   3290 
   3291 #ifdef WM_EVENT_COUNTERS
   3292 	/* Attach event counters. */
   3293 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3294 	    NULL, xname, "linkintr");
   3295 
   3296 	evcnt_attach_dynamic(&sc->sc_ev_crcerrs, EVCNT_TYPE_MISC,
   3297 	    NULL, xname, "CRC Error");
   3298 	evcnt_attach_dynamic(&sc->sc_ev_symerrc, EVCNT_TYPE_MISC,
   3299 	    NULL, xname, "Symbol Error");
   3300 	evcnt_attach_dynamic(&sc->sc_ev_mpc, EVCNT_TYPE_MISC,
   3301 	    NULL, xname, "Missed Packets");
   3302 	evcnt_attach_dynamic(&sc->sc_ev_colc, EVCNT_TYPE_MISC,
   3303 	    NULL, xname, "Collision");
   3304 	evcnt_attach_dynamic(&sc->sc_ev_sec, EVCNT_TYPE_MISC,
   3305 	    NULL, xname, "Sequence Error");
   3306 	evcnt_attach_dynamic(&sc->sc_ev_rlec, EVCNT_TYPE_MISC,
   3307 	    NULL, xname, "Receive Length Error");
   3308 
   3309 	if (sc->sc_type >= WM_T_82543) {
   3310 		evcnt_attach_dynamic(&sc->sc_ev_algnerrc, EVCNT_TYPE_MISC,
   3311 		    NULL, xname, "Alignment Error");
   3312 		evcnt_attach_dynamic(&sc->sc_ev_rxerrc, EVCNT_TYPE_MISC,
   3313 		    NULL, xname, "Receive Error");
   3314 		/* XXX Does 82575 have HTDPMC? */
   3315 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   3316 			evcnt_attach_dynamic(&sc->sc_ev_cexterr,
   3317 			    EVCNT_TYPE_MISC, NULL, xname,
   3318 			    "Carrier Extension Error");
   3319 		else
   3320 			evcnt_attach_dynamic(&sc->sc_ev_htdpmc,
   3321 			    EVCNT_TYPE_MISC, NULL, xname,
   3322 			    "Host Transmit Discarded Packets by MAC");
   3323 
   3324 		evcnt_attach_dynamic(&sc->sc_ev_tncrs, EVCNT_TYPE_MISC,
   3325 		    NULL, xname, "Tx with No CRS");
   3326 		evcnt_attach_dynamic(&sc->sc_ev_tsctc, EVCNT_TYPE_MISC,
   3327 		    NULL, xname, "TCP Segmentation Context Tx");
   3328 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   3329 			evcnt_attach_dynamic(&sc->sc_ev_tsctfc,
   3330 			    EVCNT_TYPE_MISC, NULL, xname,
   3331 			    "TCP Segmentation Context Tx Fail");
   3332 		else {
   3333 			/* XXX Is the circuit breaker only for 82576? */
   3334 			evcnt_attach_dynamic(&sc->sc_ev_cbrdpc,
   3335 			    EVCNT_TYPE_MISC, NULL, xname,
   3336 			    "Circuit Breaker Rx Dropped Packet");
   3337 			evcnt_attach_dynamic(&sc->sc_ev_cbrmpc,
   3338 			    EVCNT_TYPE_MISC, NULL, xname,
   3339 			    "Circuit Breaker Rx Manageability Packet");
   3340 		}
   3341 	}
   3342 
   3343 	if (sc->sc_type >= WM_T_82542_2_1) {
   3344 		evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3345 		    NULL, xname, "XOFF Transmitted");
   3346 		evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3347 		    NULL, xname, "XON Transmitted");
   3348 		evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3349 		    NULL, xname, "XOFF Received");
   3350 		evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3351 		    NULL, xname, "XON Received");
   3352 		evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3353 		    NULL, xname, "FC Received Unsupported");
   3354 	}
   3355 
   3356 	evcnt_attach_dynamic(&sc->sc_ev_scc, EVCNT_TYPE_MISC,
   3357 	    NULL, xname, "Single Collision");
   3358 	evcnt_attach_dynamic(&sc->sc_ev_ecol, EVCNT_TYPE_MISC,
   3359 	    NULL, xname, "Excessive Collisions");
   3360 	evcnt_attach_dynamic(&sc->sc_ev_mcc, EVCNT_TYPE_MISC,
   3361 	    NULL, xname, "Multiple Collision");
   3362 	evcnt_attach_dynamic(&sc->sc_ev_latecol, EVCNT_TYPE_MISC,
   3363 	    NULL, xname, "Late Collisions");
   3364 
   3365 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
   3366 		evcnt_attach_dynamic(&sc->sc_ev_cbtmpc, EVCNT_TYPE_MISC,
   3367 		    NULL, xname, "Circuit Breaker Tx Manageability Packet");
   3368 
   3369 	evcnt_attach_dynamic(&sc->sc_ev_dc, EVCNT_TYPE_MISC,
   3370 	    NULL, xname, "Defer");
   3371 	evcnt_attach_dynamic(&sc->sc_ev_prc64, EVCNT_TYPE_MISC,
   3372 	    NULL, xname, "Packets Rx (64 bytes)");
   3373 	evcnt_attach_dynamic(&sc->sc_ev_prc127, EVCNT_TYPE_MISC,
   3374 	    NULL, xname, "Packets Rx (65-127 bytes)");
   3375 	evcnt_attach_dynamic(&sc->sc_ev_prc255, EVCNT_TYPE_MISC,
   3376 	    NULL, xname, "Packets Rx (128-255 bytes)");
   3377 	evcnt_attach_dynamic(&sc->sc_ev_prc511, EVCNT_TYPE_MISC,
   3378 	    NULL, xname, "Packets Rx (256-511 bytes)");
   3379 	evcnt_attach_dynamic(&sc->sc_ev_prc1023, EVCNT_TYPE_MISC,
   3380 	    NULL, xname, "Packets Rx (512-1023 bytes)");
   3381 	evcnt_attach_dynamic(&sc->sc_ev_prc1522, EVCNT_TYPE_MISC,
   3382 	    NULL, xname, "Packets Rx (1024-1522 bytes)");
   3383 	evcnt_attach_dynamic(&sc->sc_ev_gprc, EVCNT_TYPE_MISC,
   3384 	    NULL, xname, "Good Packets Rx");
   3385 	evcnt_attach_dynamic(&sc->sc_ev_bprc, EVCNT_TYPE_MISC,
   3386 	    NULL, xname, "Broadcast Packets Rx");
   3387 	evcnt_attach_dynamic(&sc->sc_ev_mprc, EVCNT_TYPE_MISC,
   3388 	    NULL, xname, "Multicast Packets Rx");
   3389 	evcnt_attach_dynamic(&sc->sc_ev_gptc, EVCNT_TYPE_MISC,
   3390 	    NULL, xname, "Good Packets Tx");
   3391 	evcnt_attach_dynamic(&sc->sc_ev_gorc, EVCNT_TYPE_MISC,
   3392 	    NULL, xname, "Good Octets Rx");
   3393 	evcnt_attach_dynamic(&sc->sc_ev_gotc, EVCNT_TYPE_MISC,
   3394 	    NULL, xname, "Good Octets Tx");
   3395 	evcnt_attach_dynamic(&sc->sc_ev_rnbc, EVCNT_TYPE_MISC,
   3396 	    NULL, xname, "Rx No Buffers");
   3397 	evcnt_attach_dynamic(&sc->sc_ev_ruc, EVCNT_TYPE_MISC,
   3398 	    NULL, xname, "Rx Undersize (valid CRC)");
   3399 	evcnt_attach_dynamic(&sc->sc_ev_rfc, EVCNT_TYPE_MISC,
   3400 	    NULL, xname, "Rx Fragment (bad CRC)");
   3401 	evcnt_attach_dynamic(&sc->sc_ev_roc, EVCNT_TYPE_MISC,
   3402 	    NULL, xname, "Rx Oversize (valid CRC)");
   3403 	evcnt_attach_dynamic(&sc->sc_ev_rjc, EVCNT_TYPE_MISC,
   3404 	    NULL, xname, "Rx Jabber (bad CRC)");
   3405 	if (sc->sc_type >= WM_T_82540) {
   3406 		evcnt_attach_dynamic(&sc->sc_ev_mgtprc, EVCNT_TYPE_MISC,
   3407 		    NULL, xname, "Management Packets RX");
   3408 		evcnt_attach_dynamic(&sc->sc_ev_mgtpdc, EVCNT_TYPE_MISC,
   3409 		    NULL, xname, "Management Packets Dropped");
   3410 		evcnt_attach_dynamic(&sc->sc_ev_mgtptc, EVCNT_TYPE_MISC,
   3411 		    NULL, xname, "Management Packets TX");
   3412 	}
   3413 	evcnt_attach_dynamic(&sc->sc_ev_tor, EVCNT_TYPE_MISC,
   3414 	    NULL, xname, "Total Octets Rx");
   3415 	evcnt_attach_dynamic(&sc->sc_ev_tot, EVCNT_TYPE_MISC,
   3416 	    NULL, xname, "Total Octets Tx");
   3417 	evcnt_attach_dynamic(&sc->sc_ev_tpr, EVCNT_TYPE_MISC,
   3418 	    NULL, xname, "Total Packets Rx");
   3419 	evcnt_attach_dynamic(&sc->sc_ev_tpt, EVCNT_TYPE_MISC,
   3420 	    NULL, xname, "Total Packets Tx");
   3421 	evcnt_attach_dynamic(&sc->sc_ev_ptc64, EVCNT_TYPE_MISC,
   3422 	    NULL, xname, "Packets Tx (64 bytes)");
   3423 	evcnt_attach_dynamic(&sc->sc_ev_ptc127, EVCNT_TYPE_MISC,
   3424 	    NULL, xname, "Packets Tx (65-127 bytes)");
   3425 	evcnt_attach_dynamic(&sc->sc_ev_ptc255, EVCNT_TYPE_MISC,
   3426 	    NULL, xname, "Packets Tx (128-255 bytes)");
   3427 	evcnt_attach_dynamic(&sc->sc_ev_ptc511, EVCNT_TYPE_MISC,
   3428 	    NULL, xname, "Packets Tx (256-511 bytes)");
   3429 	evcnt_attach_dynamic(&sc->sc_ev_ptc1023, EVCNT_TYPE_MISC,
   3430 	    NULL, xname, "Packets Tx (512-1023 bytes)");
   3431 	evcnt_attach_dynamic(&sc->sc_ev_ptc1522, EVCNT_TYPE_MISC,
   3432 	    NULL, xname, "Packets Tx (1024-1522 Bytes)");
   3433 	evcnt_attach_dynamic(&sc->sc_ev_mptc, EVCNT_TYPE_MISC,
   3434 	    NULL, xname, "Multicast Packets Tx");
   3435 	evcnt_attach_dynamic(&sc->sc_ev_bptc, EVCNT_TYPE_MISC,
   3436 	    NULL, xname, "Broadcast Packets Tx");
   3437 	if (sc->sc_type >= WM_T_82571) /* PCIe, 80003 and ICH/PCHs */
   3438 		evcnt_attach_dynamic(&sc->sc_ev_iac, EVCNT_TYPE_MISC,
   3439 		    NULL, xname, "Interrupt Assertion");
   3440 	if (sc->sc_type < WM_T_82575) {
   3441 		evcnt_attach_dynamic(&sc->sc_ev_icrxptc, EVCNT_TYPE_MISC,
   3442 		    NULL, xname, "Intr. Cause Rx Pkt Timer Expire");
   3443 		evcnt_attach_dynamic(&sc->sc_ev_icrxatc, EVCNT_TYPE_MISC,
   3444 		    NULL, xname, "Intr. Cause Rx Abs Timer Expire");
   3445 		evcnt_attach_dynamic(&sc->sc_ev_ictxptc, EVCNT_TYPE_MISC,
   3446 		    NULL, xname, "Intr. Cause Tx Pkt Timer Expire");
   3447 		evcnt_attach_dynamic(&sc->sc_ev_ictxatc, EVCNT_TYPE_MISC,
   3448 		    NULL, xname, "Intr. Cause Tx Abs Timer Expire");
   3449 		evcnt_attach_dynamic(&sc->sc_ev_ictxqec, EVCNT_TYPE_MISC,
   3450 		    NULL, xname, "Intr. Cause Tx Queue Empty");
   3451 		evcnt_attach_dynamic(&sc->sc_ev_ictxqmtc, EVCNT_TYPE_MISC,
   3452 		    NULL, xname, "Intr. Cause Tx Queue Min Thresh");
   3453 		evcnt_attach_dynamic(&sc->sc_ev_rxdmtc, EVCNT_TYPE_MISC,
   3454 		    NULL, xname, "Intr. Cause Rx Desc Min Thresh");
   3455 
   3456 		/* XXX 82575 document says it has ICRXOC. Is that right? */
   3457 		evcnt_attach_dynamic(&sc->sc_ev_icrxoc, EVCNT_TYPE_MISC,
   3458 		    NULL, xname, "Interrupt Cause Receiver Overrun");
   3459 	} else if (!WM_IS_ICHPCH(sc)) {
   3460 		/*
   3461 		 * For 82575 and newer.
   3462 		 *
   3463 		 * On 80003, ICHs and PCHs, it seems all of the following
   3464 		 * registers are zero.
   3465 		 */
   3466 		evcnt_attach_dynamic(&sc->sc_ev_rpthc, EVCNT_TYPE_MISC,
   3467 		    NULL, xname, "Rx Packets To Host");
   3468 		evcnt_attach_dynamic(&sc->sc_ev_debug1, EVCNT_TYPE_MISC,
   3469 		    NULL, xname, "Debug Counter 1");
   3470 		evcnt_attach_dynamic(&sc->sc_ev_debug2, EVCNT_TYPE_MISC,
   3471 		    NULL, xname, "Debug Counter 2");
   3472 		evcnt_attach_dynamic(&sc->sc_ev_debug3, EVCNT_TYPE_MISC,
   3473 		    NULL, xname, "Debug Counter 3");
   3474 
   3475 		/*
   3476 		 * 82575 datasheet says 0x4118 is for TXQEC(Tx Queue Empty).
   3477 		 * I think it's wrong. The real count I observed is the same
   3478 		 * as GPTC(Good Packets Tx) and TPT(Total Packets Tx).
   3479 		 * It's HGPTC(Host Good Packets Tx) which is described in
   3480 		 * 82576's datasheet.
   3481 		 */
   3482 		evcnt_attach_dynamic(&sc->sc_ev_hgptc, EVCNT_TYPE_MISC,
   3483 		    NULL, xname, "Host Good Packets TX");
   3484 
   3485 		evcnt_attach_dynamic(&sc->sc_ev_debug4, EVCNT_TYPE_MISC,
   3486 		    NULL, xname, "Debug Counter 4");
   3487 		evcnt_attach_dynamic(&sc->sc_ev_rxdmtc, EVCNT_TYPE_MISC,
   3488 		    NULL, xname, "Rx Desc Min Thresh");
   3489 		/* XXX Is the circuit breaker only for 82576? */
   3490 		evcnt_attach_dynamic(&sc->sc_ev_htcbdpc, EVCNT_TYPE_MISC,
   3491 		    NULL, xname, "Host Tx Circuit Breaker Dropped Packets");
   3492 
   3493 		evcnt_attach_dynamic(&sc->sc_ev_hgorc, EVCNT_TYPE_MISC,
   3494 		    NULL, xname, "Host Good Octets Rx");
   3495 		evcnt_attach_dynamic(&sc->sc_ev_hgotc, EVCNT_TYPE_MISC,
   3496 		    NULL, xname, "Host Good Octets Tx");
   3497 		evcnt_attach_dynamic(&sc->sc_ev_lenerrs, EVCNT_TYPE_MISC,
   3498 		    NULL, xname, "Length Errors (length/type <= 1500)");
   3499 		evcnt_attach_dynamic(&sc->sc_ev_scvpc, EVCNT_TYPE_MISC,
   3500 		    NULL, xname, "SerDes/SGMII Code Violation Packet");
   3501 		evcnt_attach_dynamic(&sc->sc_ev_hrmpc, EVCNT_TYPE_MISC,
   3502 		    NULL, xname, "Header Redirection Missed Packet");
   3503 	}
   3504 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   3505 		evcnt_attach_dynamic(&sc->sc_ev_tlpic, EVCNT_TYPE_MISC,
   3506 		    NULL, xname, "EEE Tx LPI");
   3507 		evcnt_attach_dynamic(&sc->sc_ev_rlpic, EVCNT_TYPE_MISC,
   3508 		    NULL, xname, "EEE Rx LPI");
   3509 		evcnt_attach_dynamic(&sc->sc_ev_b2ogprc, EVCNT_TYPE_MISC,
   3510 		    NULL, xname, "BMC2OS Packets received by host");
   3511 		evcnt_attach_dynamic(&sc->sc_ev_o2bspc, EVCNT_TYPE_MISC,
   3512 		    NULL, xname, "OS2BMC Packets transmitted by host");
   3513 		evcnt_attach_dynamic(&sc->sc_ev_b2ospc, EVCNT_TYPE_MISC,
   3514 		    NULL, xname, "BMC2OS Packets sent by BMC");
   3515 		evcnt_attach_dynamic(&sc->sc_ev_o2bgptc, EVCNT_TYPE_MISC,
   3516 		    NULL, xname, "OS2BMC Packets received by BMC");
   3517 	}
   3518 #endif /* WM_EVENT_COUNTERS */
   3519 
   3520 	sc->sc_txrx_use_workqueue = false;
   3521 
   3522 	if (wm_phy_need_linkdown_discard(sc)) {
   3523 		DPRINTF(sc, WM_DEBUG_LINK,
   3524 		    ("%s: %s: Set linkdown discard flag\n",
   3525 			device_xname(sc->sc_dev), __func__));
   3526 		wm_set_linkdown_discard(sc);
   3527 	}
   3528 
   3529 	wm_init_sysctls(sc);
   3530 
   3531 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3532 		pmf_class_network_register(self, ifp);
   3533 	else
   3534 		aprint_error_dev(self, "couldn't establish power handler\n");
   3535 
   3536 	sc->sc_flags |= WM_F_ATTACHED;
   3537 out:
   3538 	return;
   3539 }
   3540 
   3541 /* The detach function (ca_detach) */
   3542 static int
   3543 wm_detach(device_t self, int flags __unused)
   3544 {
   3545 	struct wm_softc *sc = device_private(self);
   3546 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3547 	int i;
   3548 
   3549 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3550 		return 0;
   3551 
   3552 	/* Stop the interface. Callouts are stopped in it. */
   3553 	IFNET_LOCK(ifp);
   3554 	sc->sc_dying = true;
   3555 	wm_stop(ifp, 1);
   3556 	IFNET_UNLOCK(ifp);
   3557 
   3558 	pmf_device_deregister(self);
   3559 
   3560 	sysctl_teardown(&sc->sc_sysctllog);
   3561 
   3562 #ifdef WM_EVENT_COUNTERS
   3563 	evcnt_detach(&sc->sc_ev_linkintr);
   3564 
   3565 	evcnt_detach(&sc->sc_ev_crcerrs);
   3566 	evcnt_detach(&sc->sc_ev_symerrc);
   3567 	evcnt_detach(&sc->sc_ev_mpc);
   3568 	evcnt_detach(&sc->sc_ev_colc);
   3569 	evcnt_detach(&sc->sc_ev_sec);
   3570 	evcnt_detach(&sc->sc_ev_rlec);
   3571 
   3572 	if (sc->sc_type >= WM_T_82543) {
   3573 		evcnt_detach(&sc->sc_ev_algnerrc);
   3574 		evcnt_detach(&sc->sc_ev_rxerrc);
   3575 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   3576 			evcnt_detach(&sc->sc_ev_cexterr);
   3577 		else
   3578 			evcnt_detach(&sc->sc_ev_htdpmc);
   3579 
   3580 		evcnt_detach(&sc->sc_ev_tncrs);
   3581 		evcnt_detach(&sc->sc_ev_tsctc);
   3582 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   3583 			evcnt_detach(&sc->sc_ev_tsctfc);
   3584 		else {
   3585 			evcnt_detach(&sc->sc_ev_cbrdpc);
   3586 			evcnt_detach(&sc->sc_ev_cbrmpc);
   3587 		}
   3588 	}
   3589 
   3590 	if (sc->sc_type >= WM_T_82542_2_1) {
   3591 		evcnt_detach(&sc->sc_ev_tx_xoff);
   3592 		evcnt_detach(&sc->sc_ev_tx_xon);
   3593 		evcnt_detach(&sc->sc_ev_rx_xoff);
   3594 		evcnt_detach(&sc->sc_ev_rx_xon);
   3595 		evcnt_detach(&sc->sc_ev_rx_macctl);
   3596 	}
   3597 
   3598 	evcnt_detach(&sc->sc_ev_scc);
   3599 	evcnt_detach(&sc->sc_ev_ecol);
   3600 	evcnt_detach(&sc->sc_ev_mcc);
   3601 	evcnt_detach(&sc->sc_ev_latecol);
   3602 
   3603 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
   3604 		evcnt_detach(&sc->sc_ev_cbtmpc);
   3605 
   3606 	evcnt_detach(&sc->sc_ev_dc);
   3607 	evcnt_detach(&sc->sc_ev_prc64);
   3608 	evcnt_detach(&sc->sc_ev_prc127);
   3609 	evcnt_detach(&sc->sc_ev_prc255);
   3610 	evcnt_detach(&sc->sc_ev_prc511);
   3611 	evcnt_detach(&sc->sc_ev_prc1023);
   3612 	evcnt_detach(&sc->sc_ev_prc1522);
   3613 	evcnt_detach(&sc->sc_ev_gprc);
   3614 	evcnt_detach(&sc->sc_ev_bprc);
   3615 	evcnt_detach(&sc->sc_ev_mprc);
   3616 	evcnt_detach(&sc->sc_ev_gptc);
   3617 	evcnt_detach(&sc->sc_ev_gorc);
   3618 	evcnt_detach(&sc->sc_ev_gotc);
   3619 	evcnt_detach(&sc->sc_ev_rnbc);
   3620 	evcnt_detach(&sc->sc_ev_ruc);
   3621 	evcnt_detach(&sc->sc_ev_rfc);
   3622 	evcnt_detach(&sc->sc_ev_roc);
   3623 	evcnt_detach(&sc->sc_ev_rjc);
   3624 	if (sc->sc_type >= WM_T_82540) {
   3625 		evcnt_detach(&sc->sc_ev_mgtprc);
   3626 		evcnt_detach(&sc->sc_ev_mgtpdc);
   3627 		evcnt_detach(&sc->sc_ev_mgtptc);
   3628 	}
   3629 	evcnt_detach(&sc->sc_ev_tor);
   3630 	evcnt_detach(&sc->sc_ev_tot);
   3631 	evcnt_detach(&sc->sc_ev_tpr);
   3632 	evcnt_detach(&sc->sc_ev_tpt);
   3633 	evcnt_detach(&sc->sc_ev_ptc64);
   3634 	evcnt_detach(&sc->sc_ev_ptc127);
   3635 	evcnt_detach(&sc->sc_ev_ptc255);
   3636 	evcnt_detach(&sc->sc_ev_ptc511);
   3637 	evcnt_detach(&sc->sc_ev_ptc1023);
   3638 	evcnt_detach(&sc->sc_ev_ptc1522);
   3639 	evcnt_detach(&sc->sc_ev_mptc);
   3640 	evcnt_detach(&sc->sc_ev_bptc);
   3641 	if (sc->sc_type >= WM_T_82571)
   3642 		evcnt_detach(&sc->sc_ev_iac);
   3643 	if (sc->sc_type < WM_T_82575) {
   3644 		evcnt_detach(&sc->sc_ev_icrxptc);
   3645 		evcnt_detach(&sc->sc_ev_icrxatc);
   3646 		evcnt_detach(&sc->sc_ev_ictxptc);
   3647 		evcnt_detach(&sc->sc_ev_ictxatc);
   3648 		evcnt_detach(&sc->sc_ev_ictxqec);
   3649 		evcnt_detach(&sc->sc_ev_ictxqmtc);
   3650 		evcnt_detach(&sc->sc_ev_rxdmtc);
   3651 		evcnt_detach(&sc->sc_ev_icrxoc);
   3652 	} else if (!WM_IS_ICHPCH(sc)) {
   3653 		evcnt_detach(&sc->sc_ev_rpthc);
   3654 		evcnt_detach(&sc->sc_ev_debug1);
   3655 		evcnt_detach(&sc->sc_ev_debug2);
   3656 		evcnt_detach(&sc->sc_ev_debug3);
   3657 		evcnt_detach(&sc->sc_ev_hgptc);
   3658 		evcnt_detach(&sc->sc_ev_debug4);
   3659 		evcnt_detach(&sc->sc_ev_rxdmtc);
   3660 		evcnt_detach(&sc->sc_ev_htcbdpc);
   3661 
   3662 		evcnt_detach(&sc->sc_ev_hgorc);
   3663 		evcnt_detach(&sc->sc_ev_hgotc);
   3664 		evcnt_detach(&sc->sc_ev_lenerrs);
   3665 		evcnt_detach(&sc->sc_ev_scvpc);
   3666 		evcnt_detach(&sc->sc_ev_hrmpc);
   3667 	}
   3668 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   3669 		evcnt_detach(&sc->sc_ev_tlpic);
   3670 		evcnt_detach(&sc->sc_ev_rlpic);
   3671 		evcnt_detach(&sc->sc_ev_b2ogprc);
   3672 		evcnt_detach(&sc->sc_ev_o2bspc);
   3673 		evcnt_detach(&sc->sc_ev_b2ospc);
   3674 		evcnt_detach(&sc->sc_ev_o2bgptc);
   3675 	}
   3676 #endif /* WM_EVENT_COUNTERS */
   3677 
   3678 	rnd_detach_source(&sc->rnd_source);
   3679 
   3680 	/* Tell the firmware about the release */
   3681 	mutex_enter(sc->sc_core_lock);
   3682 	wm_release_manageability(sc);
   3683 	wm_release_hw_control(sc);
   3684 	wm_enable_wakeup(sc);
   3685 	mutex_exit(sc->sc_core_lock);
   3686 
   3687 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3688 
   3689 	ether_ifdetach(ifp);
   3690 	if_detach(ifp);
   3691 	if_percpuq_destroy(sc->sc_ipq);
   3692 
   3693 	/* Delete all remaining media. */
   3694 	ifmedia_fini(&sc->sc_mii.mii_media);
   3695 
   3696 	/* Unload RX dmamaps and free mbufs */
   3697 	for (i = 0; i < sc->sc_nqueues; i++) {
   3698 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3699 		mutex_enter(rxq->rxq_lock);
   3700 		wm_rxdrain(rxq);
   3701 		mutex_exit(rxq->rxq_lock);
   3702 	}
   3703 	/* Must unlock here */
   3704 
   3705 	/* Disestablish the interrupt handler */
   3706 	for (i = 0; i < sc->sc_nintrs; i++) {
   3707 		if (sc->sc_ihs[i] != NULL) {
   3708 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3709 			sc->sc_ihs[i] = NULL;
   3710 		}
   3711 	}
   3712 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3713 
   3714 	/* wm_stop() ensured that the workqueues are stopped. */
   3715 	workqueue_destroy(sc->sc_queue_wq);
   3716 	workqueue_destroy(sc->sc_reset_wq);
   3717 
   3718 	for (i = 0; i < sc->sc_nqueues; i++)
   3719 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3720 
   3721 	wm_free_txrx_queues(sc);
   3722 
   3723 	/* Unmap the registers */
   3724 	if (sc->sc_ss) {
   3725 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3726 		sc->sc_ss = 0;
   3727 	}
   3728 	if (sc->sc_ios) {
   3729 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3730 		sc->sc_ios = 0;
   3731 	}
   3732 	if (sc->sc_flashs) {
   3733 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3734 		sc->sc_flashs = 0;
   3735 	}
   3736 
   3737 	if (sc->sc_core_lock)
   3738 		mutex_obj_free(sc->sc_core_lock);
   3739 	if (sc->sc_ich_phymtx)
   3740 		mutex_obj_free(sc->sc_ich_phymtx);
   3741 	if (sc->sc_ich_nvmmtx)
   3742 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3743 
   3744 	return 0;
   3745 }
   3746 
   3747 static bool
   3748 wm_suspend(device_t self, const pmf_qual_t *qual)
   3749 {
   3750 	struct wm_softc *sc = device_private(self);
   3751 
   3752 	wm_release_manageability(sc);
   3753 	wm_release_hw_control(sc);
   3754 	wm_enable_wakeup(sc);
   3755 
   3756 	return true;
   3757 }
   3758 
   3759 static bool
   3760 wm_resume(device_t self, const pmf_qual_t *qual)
   3761 {
   3762 	struct wm_softc *sc = device_private(self);
   3763 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3764 	pcireg_t reg;
   3765 	char buf[256];
   3766 
   3767 	reg = CSR_READ(sc, WMREG_WUS);
   3768 	if (reg != 0) {
   3769 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3770 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3771 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3772 	}
   3773 
   3774 	if (sc->sc_type >= WM_T_PCH2)
   3775 		wm_resume_workarounds_pchlan(sc);
   3776 	IFNET_LOCK(ifp);
   3777 	if ((ifp->if_flags & IFF_UP) == 0) {
   3778 		/* >= PCH_SPT hardware workaround before reset. */
   3779 		if (sc->sc_type >= WM_T_PCH_SPT)
   3780 			wm_flush_desc_rings(sc);
   3781 
   3782 		wm_reset(sc);
   3783 		/* Non-AMT based hardware can now take control from firmware */
   3784 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3785 			wm_get_hw_control(sc);
   3786 		wm_init_manageability(sc);
   3787 	} else {
   3788 		/*
   3789 		 * We called pmf_class_network_register(), so if_init() is
   3790 		 * automatically called when IFF_UP. wm_reset(),
   3791 		 * wm_get_hw_control() and wm_init_manageability() are called
   3792 		 * via wm_init().
   3793 		 */
   3794 	}
   3795 	IFNET_UNLOCK(ifp);
   3796 
   3797 	return true;
   3798 }
   3799 
   3800 /*
   3801  * wm_watchdog:
   3802  *
   3803  *	Watchdog checker.
   3804  */
   3805 static bool
   3806 wm_watchdog(struct ifnet *ifp)
   3807 {
   3808 	int qid;
   3809 	struct wm_softc *sc = ifp->if_softc;
   3810 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3811 
   3812 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3813 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3814 
   3815 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3816 	}
   3817 
   3818 #ifdef WM_DEBUG
   3819 	if (sc->sc_trigger_reset) {
   3820 		/* debug operation, no need for atomicity or reliability */
   3821 		sc->sc_trigger_reset = 0;
   3822 		hang_queue++;
   3823 	}
   3824 #endif
   3825 
   3826 	if (hang_queue == 0)
   3827 		return true;
   3828 
   3829 	if (atomic_swap_uint(&sc->sc_reset_pending, 1) == 0)
   3830 		workqueue_enqueue(sc->sc_reset_wq, &sc->sc_reset_work, NULL);
   3831 
   3832 	return false;
   3833 }
   3834 
   3835 /*
   3836  * Perform an interface watchdog reset.
   3837  */
   3838 static void
   3839 wm_handle_reset_work(struct work *work, void *arg)
   3840 {
   3841 	struct wm_softc * const sc = arg;
   3842 	struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
   3843 
   3844 	/* Don't want ioctl operations to happen */
   3845 	IFNET_LOCK(ifp);
   3846 
   3847 	/* reset the interface. */
   3848 	wm_init(ifp);
   3849 
   3850 	IFNET_UNLOCK(ifp);
   3851 
   3852 	/*
   3853 	 * There are still some upper layer processing which call
   3854 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   3855 	 */
   3856 	/* Try to get more packets going. */
   3857 	ifp->if_start(ifp);
   3858 
   3859 	atomic_store_relaxed(&sc->sc_reset_pending, 0);
   3860 }
   3861 
   3862 
   3863 static void
   3864 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3865 {
   3866 
   3867 	mutex_enter(txq->txq_lock);
   3868 	if (txq->txq_sending &&
   3869 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3870 		wm_watchdog_txq_locked(ifp, txq, hang);
   3871 
   3872 	mutex_exit(txq->txq_lock);
   3873 }
   3874 
   3875 static void
   3876 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3877     uint16_t *hang)
   3878 {
   3879 	struct wm_softc *sc = ifp->if_softc;
   3880 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3881 
   3882 	KASSERT(mutex_owned(txq->txq_lock));
   3883 
   3884 	/*
   3885 	 * Since we're using delayed interrupts, sweep up
   3886 	 * before we report an error.
   3887 	 */
   3888 	wm_txeof(txq, UINT_MAX);
   3889 
   3890 	if (txq->txq_sending)
   3891 		*hang |= __BIT(wmq->wmq_id);
   3892 
   3893 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3894 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3895 		    device_xname(sc->sc_dev));
   3896 	} else {
   3897 #ifdef WM_DEBUG
   3898 		int i, j;
   3899 		struct wm_txsoft *txs;
   3900 #endif
   3901 		log(LOG_ERR,
   3902 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3903 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3904 		    txq->txq_next);
   3905 		if_statinc(ifp, if_oerrors);
   3906 #ifdef WM_DEBUG
   3907 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3908 		     i = WM_NEXTTXS(txq, i)) {
   3909 			txs = &txq->txq_soft[i];
   3910 			printf("txs %d tx %d -> %d\n",
   3911 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3912 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3913 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3914 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3915 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3916 					printf("\t %#08x%08x\n",
   3917 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3918 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3919 				} else {
   3920 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3921 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3922 					    txq->txq_descs[j].wtx_addr.wa_low);
   3923 					printf("\t %#04x%02x%02x%08x\n",
   3924 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3925 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3926 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3927 					    txq->txq_descs[j].wtx_cmdlen);
   3928 				}
   3929 				if (j == txs->txs_lastdesc)
   3930 					break;
   3931 			}
   3932 		}
   3933 #endif
   3934 	}
   3935 }
   3936 
   3937 /*
   3938  * wm_tick:
   3939  *
   3940  *	One second timer, used to check link status, sweep up
   3941  *	completed transmit jobs, etc.
   3942  */
   3943 static void
   3944 wm_tick(void *arg)
   3945 {
   3946 	struct wm_softc *sc = arg;
   3947 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3948 
   3949 	mutex_enter(sc->sc_core_lock);
   3950 
   3951 	if (sc->sc_core_stopping) {
   3952 		mutex_exit(sc->sc_core_lock);
   3953 		return;
   3954 	}
   3955 
   3956 	wm_update_stats(sc);
   3957 
   3958 	if (sc->sc_flags & WM_F_HAS_MII) {
   3959 		bool dotick = true;
   3960 
   3961 		/*
   3962 		 * Workaround for some chips to delay sending LINK_STATE_UP.
   3963 		 * See also wm_linkintr_gmii() and wm_gmii_mediastatus().
   3964 		 */
   3965 		if ((sc->sc_flags & WM_F_DELAY_LINKUP) != 0) {
   3966 			struct timeval now;
   3967 
   3968 			getmicrotime(&now);
   3969 			if (timercmp(&now, &sc->sc_linkup_delay_time, <))
   3970 				dotick = false;
   3971 			else if (sc->sc_linkup_delay_time.tv_sec != 0) {
   3972 				/* Simplify by checking tv_sec only. */
   3973 
   3974 				sc->sc_linkup_delay_time.tv_sec = 0;
   3975 				sc->sc_linkup_delay_time.tv_usec = 0;
   3976 			}
   3977 		}
   3978 		if (dotick)
   3979 			mii_tick(&sc->sc_mii);
   3980 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3981 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3982 		wm_serdes_tick(sc);
   3983 	else
   3984 		wm_tbi_tick(sc);
   3985 
   3986 	mutex_exit(sc->sc_core_lock);
   3987 
   3988 	if (wm_watchdog(ifp))
   3989 		callout_schedule(&sc->sc_tick_ch, hz);
   3990 }
   3991 
   3992 static int
   3993 wm_ifflags_cb(struct ethercom *ec)
   3994 {
   3995 	struct ifnet *ifp = &ec->ec_if;
   3996 	struct wm_softc *sc = ifp->if_softc;
   3997 	u_short iffchange;
   3998 	int ecchange;
   3999 	bool needreset = false;
   4000 	int rc = 0;
   4001 
   4002 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4003 		device_xname(sc->sc_dev), __func__));
   4004 
   4005 	KASSERT(IFNET_LOCKED(ifp));
   4006 
   4007 	mutex_enter(sc->sc_core_lock);
   4008 
   4009 	/*
   4010 	 * Check for if_flags.
   4011 	 * Main usage is to prevent linkdown when opening bpf.
   4012 	 */
   4013 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   4014 	sc->sc_if_flags = ifp->if_flags;
   4015 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   4016 		needreset = true;
   4017 		goto ec;
   4018 	}
   4019 
   4020 	/* iff related updates */
   4021 	if ((iffchange & IFF_PROMISC) != 0)
   4022 		wm_set_filter(sc);
   4023 
   4024 	wm_set_vlan(sc);
   4025 
   4026 ec:
   4027 	/* Check for ec_capenable. */
   4028 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   4029 	sc->sc_ec_capenable = ec->ec_capenable;
   4030 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   4031 		needreset = true;
   4032 		goto out;
   4033 	}
   4034 
   4035 	/* ec related updates */
   4036 	wm_set_eee(sc);
   4037 
   4038 out:
   4039 	if (needreset)
   4040 		rc = ENETRESET;
   4041 	mutex_exit(sc->sc_core_lock);
   4042 
   4043 	return rc;
   4044 }
   4045 
   4046 static bool
   4047 wm_phy_need_linkdown_discard(struct wm_softc *sc)
   4048 {
   4049 
   4050 	switch (sc->sc_phytype) {
   4051 	case WMPHY_82577: /* ihphy */
   4052 	case WMPHY_82578: /* atphy */
   4053 	case WMPHY_82579: /* ihphy */
   4054 	case WMPHY_I217: /* ihphy */
   4055 	case WMPHY_82580: /* ihphy */
   4056 	case WMPHY_I350: /* ihphy */
   4057 		return true;
   4058 	default:
   4059 		return false;
   4060 	}
   4061 }
   4062 
   4063 static void
   4064 wm_set_linkdown_discard(struct wm_softc *sc)
   4065 {
   4066 
   4067 	for (int i = 0; i < sc->sc_nqueues; i++) {
   4068 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4069 
   4070 		mutex_enter(txq->txq_lock);
   4071 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
   4072 		mutex_exit(txq->txq_lock);
   4073 	}
   4074 }
   4075 
   4076 static void
   4077 wm_clear_linkdown_discard(struct wm_softc *sc)
   4078 {
   4079 
   4080 	for (int i = 0; i < sc->sc_nqueues; i++) {
   4081 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4082 
   4083 		mutex_enter(txq->txq_lock);
   4084 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
   4085 		mutex_exit(txq->txq_lock);
   4086 	}
   4087 }
   4088 
   4089 /*
   4090  * wm_ioctl:		[ifnet interface function]
   4091  *
   4092  *	Handle control requests from the operator.
   4093  */
   4094 static int
   4095 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   4096 {
   4097 	struct wm_softc *sc = ifp->if_softc;
   4098 	struct ifreq *ifr = (struct ifreq *)data;
   4099 	struct ifaddr *ifa = (struct ifaddr *)data;
   4100 	struct sockaddr_dl *sdl;
   4101 	int error;
   4102 
   4103 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4104 		device_xname(sc->sc_dev), __func__));
   4105 
   4106 	switch (cmd) {
   4107 	case SIOCADDMULTI:
   4108 	case SIOCDELMULTI:
   4109 		break;
   4110 	default:
   4111 		KASSERT(IFNET_LOCKED(ifp));
   4112 	}
   4113 
   4114 	if (cmd == SIOCZIFDATA) {
   4115 		/*
   4116 		 * Special handling for SIOCZIFDATA.
   4117 		 * Copying and clearing the if_data structure is done with
   4118 		 * ether_ioctl() below.
   4119 		 */
   4120 		mutex_enter(sc->sc_core_lock);
   4121 		wm_update_stats(sc);
   4122 		wm_clear_evcnt(sc);
   4123 		mutex_exit(sc->sc_core_lock);
   4124 	}
   4125 
   4126 	switch (cmd) {
   4127 	case SIOCSIFMEDIA:
   4128 		mutex_enter(sc->sc_core_lock);
   4129 		/* Flow control requires full-duplex mode. */
   4130 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   4131 		    (ifr->ifr_media & IFM_FDX) == 0)
   4132 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   4133 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   4134 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   4135 				/* We can do both TXPAUSE and RXPAUSE. */
   4136 				ifr->ifr_media |=
   4137 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   4138 			}
   4139 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   4140 		}
   4141 		mutex_exit(sc->sc_core_lock);
   4142 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   4143 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
   4144 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE) {
   4145 				DPRINTF(sc, WM_DEBUG_LINK,
   4146 				    ("%s: %s: Set linkdown discard flag\n",
   4147 					device_xname(sc->sc_dev), __func__));
   4148 				wm_set_linkdown_discard(sc);
   4149 			}
   4150 		}
   4151 		break;
   4152 	case SIOCINITIFADDR:
   4153 		mutex_enter(sc->sc_core_lock);
   4154 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   4155 			sdl = satosdl(ifp->if_dl->ifa_addr);
   4156 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   4157 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   4158 			/* Unicast address is the first multicast entry */
   4159 			wm_set_filter(sc);
   4160 			error = 0;
   4161 			mutex_exit(sc->sc_core_lock);
   4162 			break;
   4163 		}
   4164 		mutex_exit(sc->sc_core_lock);
   4165 		/*FALLTHROUGH*/
   4166 	default:
   4167 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
   4168 			if (((ifp->if_flags & IFF_UP) != 0) &&
   4169 			    ((ifr->ifr_flags & IFF_UP) == 0)) {
   4170 				DPRINTF(sc, WM_DEBUG_LINK,
   4171 				    ("%s: %s: Set linkdown discard flag\n",
   4172 					device_xname(sc->sc_dev), __func__));
   4173 				wm_set_linkdown_discard(sc);
   4174 			}
   4175 		}
   4176 		const int s = splnet();
   4177 		/* It may call wm_start, so unlock here */
   4178 		error = ether_ioctl(ifp, cmd, data);
   4179 		splx(s);
   4180 		if (error != ENETRESET)
   4181 			break;
   4182 
   4183 		error = 0;
   4184 
   4185 		if (cmd == SIOCSIFCAP)
   4186 			error = if_init(ifp);
   4187 		else if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
   4188 			mutex_enter(sc->sc_core_lock);
   4189 			if (sc->sc_if_flags & IFF_RUNNING) {
   4190 				/*
   4191 				 * Multicast list has changed; set the
   4192 				 * hardware filter accordingly.
   4193 				 */
   4194 				wm_set_filter(sc);
   4195 			}
   4196 			mutex_exit(sc->sc_core_lock);
   4197 		}
   4198 		break;
   4199 	}
   4200 
   4201 	return error;
   4202 }
   4203 
   4204 /* MAC address related */
   4205 
   4206 /*
   4207  * Get the offset of MAC address and return it.
   4208  * If error occured, use offset 0.
   4209  */
   4210 static uint16_t
   4211 wm_check_alt_mac_addr(struct wm_softc *sc)
   4212 {
   4213 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4214 	uint16_t offset = NVM_OFF_MACADDR;
   4215 
   4216 	/* Try to read alternative MAC address pointer */
   4217 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   4218 		return 0;
   4219 
   4220 	/* Check pointer if it's valid or not. */
   4221 	if ((offset == 0x0000) || (offset == 0xffff))
   4222 		return 0;
   4223 
   4224 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   4225 	/*
   4226 	 * Check whether alternative MAC address is valid or not.
   4227 	 * Some cards have non 0xffff pointer but those don't use
   4228 	 * alternative MAC address in reality.
   4229 	 *
   4230 	 * Check whether the broadcast bit is set or not.
   4231 	 */
   4232 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   4233 		if (((myea[0] & 0xff) & 0x01) == 0)
   4234 			return offset; /* Found */
   4235 
   4236 	/* Not found */
   4237 	return 0;
   4238 }
   4239 
   4240 static int
   4241 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   4242 {
   4243 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4244 	uint16_t offset = NVM_OFF_MACADDR;
   4245 	int do_invert = 0;
   4246 
   4247 	switch (sc->sc_type) {
   4248 	case WM_T_82580:
   4249 	case WM_T_I350:
   4250 	case WM_T_I354:
   4251 		/* EEPROM Top Level Partitioning */
   4252 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   4253 		break;
   4254 	case WM_T_82571:
   4255 	case WM_T_82575:
   4256 	case WM_T_82576:
   4257 	case WM_T_80003:
   4258 	case WM_T_I210:
   4259 	case WM_T_I211:
   4260 		offset = wm_check_alt_mac_addr(sc);
   4261 		if (offset == 0)
   4262 			if ((sc->sc_funcid & 0x01) == 1)
   4263 				do_invert = 1;
   4264 		break;
   4265 	default:
   4266 		if ((sc->sc_funcid & 0x01) == 1)
   4267 			do_invert = 1;
   4268 		break;
   4269 	}
   4270 
   4271 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   4272 		goto bad;
   4273 
   4274 	enaddr[0] = myea[0] & 0xff;
   4275 	enaddr[1] = myea[0] >> 8;
   4276 	enaddr[2] = myea[1] & 0xff;
   4277 	enaddr[3] = myea[1] >> 8;
   4278 	enaddr[4] = myea[2] & 0xff;
   4279 	enaddr[5] = myea[2] >> 8;
   4280 
   4281 	/*
   4282 	 * Toggle the LSB of the MAC address on the second port
   4283 	 * of some dual port cards.
   4284 	 */
   4285 	if (do_invert != 0)
   4286 		enaddr[5] ^= 1;
   4287 
   4288 	return 0;
   4289 
   4290 bad:
   4291 	return -1;
   4292 }
   4293 
   4294 /*
   4295  * wm_set_ral:
   4296  *
   4297  *	Set an entery in the receive address list.
   4298  */
   4299 static void
   4300 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   4301 {
   4302 	uint32_t ral_lo, ral_hi, addrl, addrh;
   4303 	uint32_t wlock_mac;
   4304 	int rv;
   4305 
   4306 	if (enaddr != NULL) {
   4307 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   4308 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   4309 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   4310 		ral_hi |= RAL_AV;
   4311 	} else {
   4312 		ral_lo = 0;
   4313 		ral_hi = 0;
   4314 	}
   4315 
   4316 	switch (sc->sc_type) {
   4317 	case WM_T_82542_2_0:
   4318 	case WM_T_82542_2_1:
   4319 	case WM_T_82543:
   4320 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   4321 		CSR_WRITE_FLUSH(sc);
   4322 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   4323 		CSR_WRITE_FLUSH(sc);
   4324 		break;
   4325 	case WM_T_PCH2:
   4326 	case WM_T_PCH_LPT:
   4327 	case WM_T_PCH_SPT:
   4328 	case WM_T_PCH_CNP:
   4329 	case WM_T_PCH_TGP:
   4330 		if (idx == 0) {
   4331 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4332 			CSR_WRITE_FLUSH(sc);
   4333 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4334 			CSR_WRITE_FLUSH(sc);
   4335 			return;
   4336 		}
   4337 		if (sc->sc_type != WM_T_PCH2) {
   4338 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   4339 			    FWSM_WLOCK_MAC);
   4340 			addrl = WMREG_SHRAL(idx - 1);
   4341 			addrh = WMREG_SHRAH(idx - 1);
   4342 		} else {
   4343 			wlock_mac = 0;
   4344 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   4345 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   4346 		}
   4347 
   4348 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   4349 			rv = wm_get_swflag_ich8lan(sc);
   4350 			if (rv != 0)
   4351 				return;
   4352 			CSR_WRITE(sc, addrl, ral_lo);
   4353 			CSR_WRITE_FLUSH(sc);
   4354 			CSR_WRITE(sc, addrh, ral_hi);
   4355 			CSR_WRITE_FLUSH(sc);
   4356 			wm_put_swflag_ich8lan(sc);
   4357 		}
   4358 
   4359 		break;
   4360 	default:
   4361 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4362 		CSR_WRITE_FLUSH(sc);
   4363 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4364 		CSR_WRITE_FLUSH(sc);
   4365 		break;
   4366 	}
   4367 }
   4368 
   4369 /*
   4370  * wm_mchash:
   4371  *
   4372  *	Compute the hash of the multicast address for the 4096-bit
   4373  *	multicast filter.
   4374  */
   4375 static uint32_t
   4376 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   4377 {
   4378 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   4379 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   4380 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   4381 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   4382 	uint32_t hash;
   4383 
   4384 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4385 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4386 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4387 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)
   4388 	    || (sc->sc_type == WM_T_PCH_TGP)) {
   4389 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   4390 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   4391 		return (hash & 0x3ff);
   4392 	}
   4393 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   4394 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   4395 
   4396 	return (hash & 0xfff);
   4397 }
   4398 
   4399 /*
   4400  *
   4401  *
   4402  */
   4403 static int
   4404 wm_rar_count(struct wm_softc *sc)
   4405 {
   4406 	int size;
   4407 
   4408 	switch (sc->sc_type) {
   4409 	case WM_T_ICH8:
   4410 		size = WM_RAL_TABSIZE_ICH8 -1;
   4411 		break;
   4412 	case WM_T_ICH9:
   4413 	case WM_T_ICH10:
   4414 	case WM_T_PCH:
   4415 		size = WM_RAL_TABSIZE_ICH8;
   4416 		break;
   4417 	case WM_T_PCH2:
   4418 		size = WM_RAL_TABSIZE_PCH2;
   4419 		break;
   4420 	case WM_T_PCH_LPT:
   4421 	case WM_T_PCH_SPT:
   4422 	case WM_T_PCH_CNP:
   4423 	case WM_T_PCH_TGP:
   4424 		size = WM_RAL_TABSIZE_PCH_LPT;
   4425 		break;
   4426 	case WM_T_82575:
   4427 	case WM_T_I210:
   4428 	case WM_T_I211:
   4429 		size = WM_RAL_TABSIZE_82575;
   4430 		break;
   4431 	case WM_T_82576:
   4432 	case WM_T_82580:
   4433 		size = WM_RAL_TABSIZE_82576;
   4434 		break;
   4435 	case WM_T_I350:
   4436 	case WM_T_I354:
   4437 		size = WM_RAL_TABSIZE_I350;
   4438 		break;
   4439 	default:
   4440 		size = WM_RAL_TABSIZE;
   4441 	}
   4442 
   4443 	return size;
   4444 }
   4445 
   4446 /*
   4447  * wm_set_filter:
   4448  *
   4449  *	Set up the receive filter.
   4450  */
   4451 static void
   4452 wm_set_filter(struct wm_softc *sc)
   4453 {
   4454 	struct ethercom *ec = &sc->sc_ethercom;
   4455 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   4456 	struct ether_multi *enm;
   4457 	struct ether_multistep step;
   4458 	bus_addr_t mta_reg;
   4459 	uint32_t hash, reg, bit;
   4460 	int i, size, ralmax, rv;
   4461 
   4462 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4463 		device_xname(sc->sc_dev), __func__));
   4464 	KASSERT(mutex_owned(sc->sc_core_lock));
   4465 
   4466 	if (sc->sc_type >= WM_T_82544)
   4467 		mta_reg = WMREG_CORDOVA_MTA;
   4468 	else
   4469 		mta_reg = WMREG_MTA;
   4470 
   4471 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   4472 
   4473 	if (sc->sc_if_flags & IFF_BROADCAST)
   4474 		sc->sc_rctl |= RCTL_BAM;
   4475 	if (sc->sc_if_flags & IFF_PROMISC) {
   4476 		sc->sc_rctl |= RCTL_UPE;
   4477 		ETHER_LOCK(ec);
   4478 		ec->ec_flags |= ETHER_F_ALLMULTI;
   4479 		ETHER_UNLOCK(ec);
   4480 		goto allmulti;
   4481 	}
   4482 
   4483 	/*
   4484 	 * Set the station address in the first RAL slot, and
   4485 	 * clear the remaining slots.
   4486 	 */
   4487 	size = wm_rar_count(sc);
   4488 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   4489 
   4490 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT) ||
   4491 	    (sc->sc_type == WM_T_PCH_CNP) || (sc->sc_type == WM_T_PCH_TGP)) {
   4492 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   4493 		switch (i) {
   4494 		case 0:
   4495 			/* We can use all entries */
   4496 			ralmax = size;
   4497 			break;
   4498 		case 1:
   4499 			/* Only RAR[0] */
   4500 			ralmax = 1;
   4501 			break;
   4502 		default:
   4503 			/* Available SHRA + RAR[0] */
   4504 			ralmax = i + 1;
   4505 		}
   4506 	} else
   4507 		ralmax = size;
   4508 	for (i = 1; i < size; i++) {
   4509 		if (i < ralmax)
   4510 			wm_set_ral(sc, NULL, i);
   4511 	}
   4512 
   4513 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4514 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4515 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4516 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)
   4517 	    || (sc->sc_type == WM_T_PCH_TGP))
   4518 		size = WM_ICH8_MC_TABSIZE;
   4519 	else
   4520 		size = WM_MC_TABSIZE;
   4521 	/* Clear out the multicast table. */
   4522 	for (i = 0; i < size; i++) {
   4523 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   4524 		CSR_WRITE_FLUSH(sc);
   4525 	}
   4526 
   4527 	ETHER_LOCK(ec);
   4528 	ETHER_FIRST_MULTI(step, ec, enm);
   4529 	while (enm != NULL) {
   4530 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   4531 			ec->ec_flags |= ETHER_F_ALLMULTI;
   4532 			ETHER_UNLOCK(ec);
   4533 			/*
   4534 			 * We must listen to a range of multicast addresses.
   4535 			 * For now, just accept all multicasts, rather than
   4536 			 * trying to set only those filter bits needed to match
   4537 			 * the range.  (At this time, the only use of address
   4538 			 * ranges is for IP multicast routing, for which the
   4539 			 * range is big enough to require all bits set.)
   4540 			 */
   4541 			goto allmulti;
   4542 		}
   4543 
   4544 		hash = wm_mchash(sc, enm->enm_addrlo);
   4545 
   4546 		reg = (hash >> 5);
   4547 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4548 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4549 		    || (sc->sc_type == WM_T_PCH2)
   4550 		    || (sc->sc_type == WM_T_PCH_LPT)
   4551 		    || (sc->sc_type == WM_T_PCH_SPT)
   4552 		    || (sc->sc_type == WM_T_PCH_CNP)
   4553 		    || (sc->sc_type == WM_T_PCH_TGP))
   4554 			reg &= 0x1f;
   4555 		else
   4556 			reg &= 0x7f;
   4557 		bit = hash & 0x1f;
   4558 
   4559 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   4560 		hash |= 1U << bit;
   4561 
   4562 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   4563 			/*
   4564 			 * 82544 Errata 9: Certain register cannot be written
   4565 			 * with particular alignments in PCI-X bus operation
   4566 			 * (FCAH, MTA and VFTA).
   4567 			 */
   4568 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   4569 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4570 			CSR_WRITE_FLUSH(sc);
   4571 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   4572 			CSR_WRITE_FLUSH(sc);
   4573 		} else {
   4574 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4575 			CSR_WRITE_FLUSH(sc);
   4576 		}
   4577 
   4578 		ETHER_NEXT_MULTI(step, enm);
   4579 	}
   4580 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4581 	ETHER_UNLOCK(ec);
   4582 
   4583 	goto setit;
   4584 
   4585 allmulti:
   4586 	sc->sc_rctl |= RCTL_MPE;
   4587 
   4588 setit:
   4589 	if (sc->sc_type >= WM_T_PCH2) {
   4590 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4591 		    && (ifp->if_mtu > ETHERMTU))
   4592 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
   4593 		else
   4594 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
   4595 		if (rv != 0)
   4596 			device_printf(sc->sc_dev,
   4597 			    "Failed to do workaround for jumbo frame.\n");
   4598 	}
   4599 
   4600 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   4601 }
   4602 
   4603 /* Reset and init related */
   4604 
   4605 static void
   4606 wm_set_vlan(struct wm_softc *sc)
   4607 {
   4608 
   4609 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4610 		device_xname(sc->sc_dev), __func__));
   4611 
   4612 	/* Deal with VLAN enables. */
   4613 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   4614 		sc->sc_ctrl |= CTRL_VME;
   4615 	else
   4616 		sc->sc_ctrl &= ~CTRL_VME;
   4617 
   4618 	/* Write the control registers. */
   4619 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4620 }
   4621 
   4622 static void
   4623 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   4624 {
   4625 	uint32_t gcr;
   4626 	pcireg_t ctrl2;
   4627 
   4628 	gcr = CSR_READ(sc, WMREG_GCR);
   4629 
   4630 	/* Only take action if timeout value is defaulted to 0 */
   4631 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   4632 		goto out;
   4633 
   4634 	if ((gcr & GCR_CAP_VER2) == 0) {
   4635 		gcr |= GCR_CMPL_TMOUT_10MS;
   4636 		goto out;
   4637 	}
   4638 
   4639 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4640 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   4641 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   4642 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4643 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   4644 
   4645 out:
   4646 	/* Disable completion timeout resend */
   4647 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   4648 
   4649 	CSR_WRITE(sc, WMREG_GCR, gcr);
   4650 }
   4651 
   4652 void
   4653 wm_get_auto_rd_done(struct wm_softc *sc)
   4654 {
   4655 	int i;
   4656 
   4657 	/* wait for eeprom to reload */
   4658 	switch (sc->sc_type) {
   4659 	case WM_T_82571:
   4660 	case WM_T_82572:
   4661 	case WM_T_82573:
   4662 	case WM_T_82574:
   4663 	case WM_T_82583:
   4664 	case WM_T_82575:
   4665 	case WM_T_82576:
   4666 	case WM_T_82580:
   4667 	case WM_T_I350:
   4668 	case WM_T_I354:
   4669 	case WM_T_I210:
   4670 	case WM_T_I211:
   4671 	case WM_T_80003:
   4672 	case WM_T_ICH8:
   4673 	case WM_T_ICH9:
   4674 		for (i = 0; i < 10; i++) {
   4675 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   4676 				break;
   4677 			delay(1000);
   4678 		}
   4679 		if (i == 10) {
   4680 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4681 			    "complete\n", device_xname(sc->sc_dev));
   4682 		}
   4683 		break;
   4684 	default:
   4685 		break;
   4686 	}
   4687 }
   4688 
   4689 void
   4690 wm_lan_init_done(struct wm_softc *sc)
   4691 {
   4692 	uint32_t reg = 0;
   4693 	int i;
   4694 
   4695 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4696 		device_xname(sc->sc_dev), __func__));
   4697 
   4698 	/* Wait for eeprom to reload */
   4699 	switch (sc->sc_type) {
   4700 	case WM_T_ICH10:
   4701 	case WM_T_PCH:
   4702 	case WM_T_PCH2:
   4703 	case WM_T_PCH_LPT:
   4704 	case WM_T_PCH_SPT:
   4705 	case WM_T_PCH_CNP:
   4706 	case WM_T_PCH_TGP:
   4707 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4708 			reg = CSR_READ(sc, WMREG_STATUS);
   4709 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4710 				break;
   4711 			delay(100);
   4712 		}
   4713 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4714 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4715 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4716 		}
   4717 		break;
   4718 	default:
   4719 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4720 		    __func__);
   4721 		break;
   4722 	}
   4723 
   4724 	reg &= ~STATUS_LAN_INIT_DONE;
   4725 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4726 }
   4727 
   4728 void
   4729 wm_get_cfg_done(struct wm_softc *sc)
   4730 {
   4731 	int mask;
   4732 	uint32_t reg;
   4733 	int i;
   4734 
   4735 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4736 		device_xname(sc->sc_dev), __func__));
   4737 
   4738 	/* Wait for eeprom to reload */
   4739 	switch (sc->sc_type) {
   4740 	case WM_T_82542_2_0:
   4741 	case WM_T_82542_2_1:
   4742 		/* null */
   4743 		break;
   4744 	case WM_T_82543:
   4745 	case WM_T_82544:
   4746 	case WM_T_82540:
   4747 	case WM_T_82545:
   4748 	case WM_T_82545_3:
   4749 	case WM_T_82546:
   4750 	case WM_T_82546_3:
   4751 	case WM_T_82541:
   4752 	case WM_T_82541_2:
   4753 	case WM_T_82547:
   4754 	case WM_T_82547_2:
   4755 	case WM_T_82573:
   4756 	case WM_T_82574:
   4757 	case WM_T_82583:
   4758 		/* generic */
   4759 		delay(10*1000);
   4760 		break;
   4761 	case WM_T_80003:
   4762 	case WM_T_82571:
   4763 	case WM_T_82572:
   4764 	case WM_T_82575:
   4765 	case WM_T_82576:
   4766 	case WM_T_82580:
   4767 	case WM_T_I350:
   4768 	case WM_T_I354:
   4769 	case WM_T_I210:
   4770 	case WM_T_I211:
   4771 		if (sc->sc_type == WM_T_82571) {
   4772 			/* Only 82571 shares port 0 */
   4773 			mask = EEMNGCTL_CFGDONE_0;
   4774 		} else
   4775 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4776 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4777 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4778 				break;
   4779 			delay(1000);
   4780 		}
   4781 		if (i >= WM_PHY_CFG_TIMEOUT)
   4782 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
   4783 				device_xname(sc->sc_dev), __func__));
   4784 		break;
   4785 	case WM_T_ICH8:
   4786 	case WM_T_ICH9:
   4787 	case WM_T_ICH10:
   4788 	case WM_T_PCH:
   4789 	case WM_T_PCH2:
   4790 	case WM_T_PCH_LPT:
   4791 	case WM_T_PCH_SPT:
   4792 	case WM_T_PCH_CNP:
   4793 	case WM_T_PCH_TGP:
   4794 		delay(10*1000);
   4795 		if (sc->sc_type >= WM_T_ICH10)
   4796 			wm_lan_init_done(sc);
   4797 		else
   4798 			wm_get_auto_rd_done(sc);
   4799 
   4800 		/* Clear PHY Reset Asserted bit */
   4801 		reg = CSR_READ(sc, WMREG_STATUS);
   4802 		if ((reg & STATUS_PHYRA) != 0)
   4803 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4804 		break;
   4805 	default:
   4806 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4807 		    __func__);
   4808 		break;
   4809 	}
   4810 }
   4811 
   4812 int
   4813 wm_phy_post_reset(struct wm_softc *sc)
   4814 {
   4815 	device_t dev = sc->sc_dev;
   4816 	uint16_t reg;
   4817 	int rv = 0;
   4818 
   4819 	/* This function is only for ICH8 and newer. */
   4820 	if (sc->sc_type < WM_T_ICH8)
   4821 		return 0;
   4822 
   4823 	if (wm_phy_resetisblocked(sc)) {
   4824 		/* XXX */
   4825 		device_printf(dev, "PHY is blocked\n");
   4826 		return -1;
   4827 	}
   4828 
   4829 	/* Allow time for h/w to get to quiescent state after reset */
   4830 	delay(10*1000);
   4831 
   4832 	/* Perform any necessary post-reset workarounds */
   4833 	if (sc->sc_type == WM_T_PCH)
   4834 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4835 	else if (sc->sc_type == WM_T_PCH2)
   4836 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4837 	if (rv != 0)
   4838 		return rv;
   4839 
   4840 	/* Clear the host wakeup bit after lcd reset */
   4841 	if (sc->sc_type >= WM_T_PCH) {
   4842 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4843 		reg &= ~BM_WUC_HOST_WU_BIT;
   4844 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4845 	}
   4846 
   4847 	/* Configure the LCD with the extended configuration region in NVM */
   4848 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4849 		return rv;
   4850 
   4851 	/* Configure the LCD with the OEM bits in NVM */
   4852 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4853 
   4854 	if (sc->sc_type == WM_T_PCH2) {
   4855 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4856 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4857 			delay(10 * 1000);
   4858 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4859 		}
   4860 		/* Set EEE LPI Update Timer to 200usec */
   4861 		rv = sc->phy.acquire(sc);
   4862 		if (rv)
   4863 			return rv;
   4864 		rv = wm_write_emi_reg_locked(dev,
   4865 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4866 		sc->phy.release(sc);
   4867 	}
   4868 
   4869 	return rv;
   4870 }
   4871 
   4872 /* Only for PCH and newer */
   4873 static int
   4874 wm_write_smbus_addr(struct wm_softc *sc)
   4875 {
   4876 	uint32_t strap, freq;
   4877 	uint16_t phy_data;
   4878 	int rv;
   4879 
   4880 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4881 		device_xname(sc->sc_dev), __func__));
   4882 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4883 
   4884 	strap = CSR_READ(sc, WMREG_STRAP);
   4885 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4886 
   4887 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4888 	if (rv != 0)
   4889 		return rv;
   4890 
   4891 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4892 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4893 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4894 
   4895 	if (sc->sc_phytype == WMPHY_I217) {
   4896 		/* Restore SMBus frequency */
   4897 		if (freq --) {
   4898 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4899 			    | HV_SMB_ADDR_FREQ_HIGH);
   4900 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4901 			    HV_SMB_ADDR_FREQ_LOW);
   4902 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4903 			    HV_SMB_ADDR_FREQ_HIGH);
   4904 		} else
   4905 			DPRINTF(sc, WM_DEBUG_INIT,
   4906 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4907 				device_xname(sc->sc_dev), __func__));
   4908 	}
   4909 
   4910 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4911 	    phy_data);
   4912 }
   4913 
   4914 static int
   4915 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4916 {
   4917 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4918 	uint16_t phy_page = 0;
   4919 	int rv = 0;
   4920 
   4921 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4922 		device_xname(sc->sc_dev), __func__));
   4923 
   4924 	switch (sc->sc_type) {
   4925 	case WM_T_ICH8:
   4926 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4927 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4928 			return 0;
   4929 
   4930 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4931 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4932 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4933 			break;
   4934 		}
   4935 		/* FALLTHROUGH */
   4936 	case WM_T_PCH:
   4937 	case WM_T_PCH2:
   4938 	case WM_T_PCH_LPT:
   4939 	case WM_T_PCH_SPT:
   4940 	case WM_T_PCH_CNP:
   4941 	case WM_T_PCH_TGP:
   4942 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4943 		break;
   4944 	default:
   4945 		return 0;
   4946 	}
   4947 
   4948 	if ((rv = sc->phy.acquire(sc)) != 0)
   4949 		return rv;
   4950 
   4951 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4952 	if ((reg & sw_cfg_mask) == 0)
   4953 		goto release;
   4954 
   4955 	/*
   4956 	 * Make sure HW does not configure LCD from PHY extended configuration
   4957 	 * before SW configuration
   4958 	 */
   4959 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4960 	if ((sc->sc_type < WM_T_PCH2)
   4961 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4962 		goto release;
   4963 
   4964 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4965 		device_xname(sc->sc_dev), __func__));
   4966 	/* word_addr is in DWORD */
   4967 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4968 
   4969 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4970 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4971 	if (cnf_size == 0)
   4972 		goto release;
   4973 
   4974 	if (((sc->sc_type == WM_T_PCH)
   4975 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4976 	    || (sc->sc_type > WM_T_PCH)) {
   4977 		/*
   4978 		 * HW configures the SMBus address and LEDs when the OEM and
   4979 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4980 		 * are cleared, SW will configure them instead.
   4981 		 */
   4982 		DPRINTF(sc, WM_DEBUG_INIT,
   4983 		    ("%s: %s: Configure SMBus and LED\n",
   4984 			device_xname(sc->sc_dev), __func__));
   4985 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4986 			goto release;
   4987 
   4988 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4989 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4990 		    (uint16_t)reg);
   4991 		if (rv != 0)
   4992 			goto release;
   4993 	}
   4994 
   4995 	/* Configure LCD from extended configuration region. */
   4996 	for (i = 0; i < cnf_size; i++) {
   4997 		uint16_t reg_data, reg_addr;
   4998 
   4999 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   5000 			goto release;
   5001 
   5002 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   5003 			goto release;
   5004 
   5005 		if (reg_addr == IGPHY_PAGE_SELECT)
   5006 			phy_page = reg_data;
   5007 
   5008 		reg_addr &= IGPHY_MAXREGADDR;
   5009 		reg_addr |= phy_page;
   5010 
   5011 		KASSERT(sc->phy.writereg_locked != NULL);
   5012 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   5013 		    reg_data);
   5014 	}
   5015 
   5016 release:
   5017 	sc->phy.release(sc);
   5018 	return rv;
   5019 }
   5020 
   5021 /*
   5022  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   5023  *  @sc:       pointer to the HW structure
   5024  *  @d0_state: boolean if entering d0 or d3 device state
   5025  *
   5026  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   5027  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   5028  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   5029  */
   5030 int
   5031 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   5032 {
   5033 	uint32_t mac_reg;
   5034 	uint16_t oem_reg;
   5035 	int rv;
   5036 
   5037 	if (sc->sc_type < WM_T_PCH)
   5038 		return 0;
   5039 
   5040 	rv = sc->phy.acquire(sc);
   5041 	if (rv != 0)
   5042 		return rv;
   5043 
   5044 	if (sc->sc_type == WM_T_PCH) {
   5045 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   5046 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   5047 			goto release;
   5048 	}
   5049 
   5050 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   5051 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   5052 		goto release;
   5053 
   5054 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   5055 
   5056 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   5057 	if (rv != 0)
   5058 		goto release;
   5059 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   5060 
   5061 	if (d0_state) {
   5062 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   5063 			oem_reg |= HV_OEM_BITS_A1KDIS;
   5064 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   5065 			oem_reg |= HV_OEM_BITS_LPLU;
   5066 	} else {
   5067 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   5068 		    != 0)
   5069 			oem_reg |= HV_OEM_BITS_A1KDIS;
   5070 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   5071 		    != 0)
   5072 			oem_reg |= HV_OEM_BITS_LPLU;
   5073 	}
   5074 
   5075 	/* Set Restart auto-neg to activate the bits */
   5076 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   5077 	    && (wm_phy_resetisblocked(sc) == false))
   5078 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   5079 
   5080 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   5081 
   5082 release:
   5083 	sc->phy.release(sc);
   5084 
   5085 	return rv;
   5086 }
   5087 
   5088 /* Init hardware bits */
   5089 void
   5090 wm_initialize_hardware_bits(struct wm_softc *sc)
   5091 {
   5092 	uint32_t tarc0, tarc1, reg;
   5093 
   5094 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5095 		device_xname(sc->sc_dev), __func__));
   5096 
   5097 	/* For 82571 variant, 80003 and ICHs */
   5098 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   5099 	    || WM_IS_ICHPCH(sc)) {
   5100 
   5101 		/* Transmit Descriptor Control 0 */
   5102 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   5103 		reg |= TXDCTL_COUNT_DESC;
   5104 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   5105 
   5106 		/* Transmit Descriptor Control 1 */
   5107 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   5108 		reg |= TXDCTL_COUNT_DESC;
   5109 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   5110 
   5111 		/* TARC0 */
   5112 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   5113 		switch (sc->sc_type) {
   5114 		case WM_T_82571:
   5115 		case WM_T_82572:
   5116 		case WM_T_82573:
   5117 		case WM_T_82574:
   5118 		case WM_T_82583:
   5119 		case WM_T_80003:
   5120 			/* Clear bits 30..27 */
   5121 			tarc0 &= ~__BITS(30, 27);
   5122 			break;
   5123 		default:
   5124 			break;
   5125 		}
   5126 
   5127 		switch (sc->sc_type) {
   5128 		case WM_T_82571:
   5129 		case WM_T_82572:
   5130 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   5131 
   5132 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5133 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   5134 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   5135 			/* 8257[12] Errata No.7 */
   5136 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   5137 
   5138 			/* TARC1 bit 28 */
   5139 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5140 				tarc1 &= ~__BIT(28);
   5141 			else
   5142 				tarc1 |= __BIT(28);
   5143 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5144 
   5145 			/*
   5146 			 * 8257[12] Errata No.13
   5147 			 * Disable Dyamic Clock Gating.
   5148 			 */
   5149 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5150 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   5151 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5152 			break;
   5153 		case WM_T_82573:
   5154 		case WM_T_82574:
   5155 		case WM_T_82583:
   5156 			if ((sc->sc_type == WM_T_82574)
   5157 			    || (sc->sc_type == WM_T_82583))
   5158 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   5159 
   5160 			/* Extended Device Control */
   5161 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5162 			reg &= ~__BIT(23);	/* Clear bit 23 */
   5163 			reg |= __BIT(22);	/* Set bit 22 */
   5164 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5165 
   5166 			/* Device Control */
   5167 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   5168 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5169 
   5170 			/* PCIe Control Register */
   5171 			/*
   5172 			 * 82573 Errata (unknown).
   5173 			 *
   5174 			 * 82574 Errata 25 and 82583 Errata 12
   5175 			 * "Dropped Rx Packets":
   5176 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   5177 			 */
   5178 			reg = CSR_READ(sc, WMREG_GCR);
   5179 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   5180 			CSR_WRITE(sc, WMREG_GCR, reg);
   5181 
   5182 			if ((sc->sc_type == WM_T_82574)
   5183 			    || (sc->sc_type == WM_T_82583)) {
   5184 				/*
   5185 				 * Document says this bit must be set for
   5186 				 * proper operation.
   5187 				 */
   5188 				reg = CSR_READ(sc, WMREG_GCR);
   5189 				reg |= __BIT(22);
   5190 				CSR_WRITE(sc, WMREG_GCR, reg);
   5191 
   5192 				/*
   5193 				 * Apply workaround for hardware errata
   5194 				 * documented in errata docs Fixes issue where
   5195 				 * some error prone or unreliable PCIe
   5196 				 * completions are occurring, particularly
   5197 				 * with ASPM enabled. Without fix, issue can
   5198 				 * cause Tx timeouts.
   5199 				 */
   5200 				reg = CSR_READ(sc, WMREG_GCR2);
   5201 				reg |= __BIT(0);
   5202 				CSR_WRITE(sc, WMREG_GCR2, reg);
   5203 			}
   5204 			break;
   5205 		case WM_T_80003:
   5206 			/* TARC0 */
   5207 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   5208 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   5209 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   5210 
   5211 			/* TARC1 bit 28 */
   5212 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5213 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5214 				tarc1 &= ~__BIT(28);
   5215 			else
   5216 				tarc1 |= __BIT(28);
   5217 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5218 			break;
   5219 		case WM_T_ICH8:
   5220 		case WM_T_ICH9:
   5221 		case WM_T_ICH10:
   5222 		case WM_T_PCH:
   5223 		case WM_T_PCH2:
   5224 		case WM_T_PCH_LPT:
   5225 		case WM_T_PCH_SPT:
   5226 		case WM_T_PCH_CNP:
   5227 		case WM_T_PCH_TGP:
   5228 			/* TARC0 */
   5229 			if (sc->sc_type == WM_T_ICH8) {
   5230 				/* Set TARC0 bits 29 and 28 */
   5231 				tarc0 |= __BITS(29, 28);
   5232 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   5233 				tarc0 |= __BIT(29);
   5234 				/*
   5235 				 *  Drop bit 28. From Linux.
   5236 				 * See I218/I219 spec update
   5237 				 * "5. Buffer Overrun While the I219 is
   5238 				 * Processing DMA Transactions"
   5239 				 */
   5240 				tarc0 &= ~__BIT(28);
   5241 			}
   5242 			/* Set TARC0 bits 23,24,26,27 */
   5243 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   5244 
   5245 			/* CTRL_EXT */
   5246 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5247 			reg |= __BIT(22);	/* Set bit 22 */
   5248 			/*
   5249 			 * Enable PHY low-power state when MAC is at D3
   5250 			 * w/o WoL
   5251 			 */
   5252 			if (sc->sc_type >= WM_T_PCH)
   5253 				reg |= CTRL_EXT_PHYPDEN;
   5254 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5255 
   5256 			/* TARC1 */
   5257 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5258 			/* bit 28 */
   5259 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5260 				tarc1 &= ~__BIT(28);
   5261 			else
   5262 				tarc1 |= __BIT(28);
   5263 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   5264 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5265 
   5266 			/* Device Status */
   5267 			if (sc->sc_type == WM_T_ICH8) {
   5268 				reg = CSR_READ(sc, WMREG_STATUS);
   5269 				reg &= ~__BIT(31);
   5270 				CSR_WRITE(sc, WMREG_STATUS, reg);
   5271 
   5272 			}
   5273 
   5274 			/* IOSFPC */
   5275 			if (sc->sc_type == WM_T_PCH_SPT) {
   5276 				reg = CSR_READ(sc, WMREG_IOSFPC);
   5277 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   5278 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   5279 			}
   5280 			/*
   5281 			 * Work-around descriptor data corruption issue during
   5282 			 * NFS v2 UDP traffic, just disable the NFS filtering
   5283 			 * capability.
   5284 			 */
   5285 			reg = CSR_READ(sc, WMREG_RFCTL);
   5286 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   5287 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5288 			break;
   5289 		default:
   5290 			break;
   5291 		}
   5292 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   5293 
   5294 		switch (sc->sc_type) {
   5295 		case WM_T_82571:
   5296 		case WM_T_82572:
   5297 		case WM_T_82573:
   5298 		case WM_T_80003:
   5299 		case WM_T_ICH8:
   5300 			/*
   5301 			 * 8257[12] Errata No.52, 82573 Errata No.43 and some
   5302 			 * others to avoid RSS Hash Value bug.
   5303 			 */
   5304 			reg = CSR_READ(sc, WMREG_RFCTL);
   5305 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   5306 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5307 			break;
   5308 		case WM_T_82574:
   5309 			/* Use extened Rx descriptor. */
   5310 			reg = CSR_READ(sc, WMREG_RFCTL);
   5311 			reg |= WMREG_RFCTL_EXSTEN;
   5312 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5313 			break;
   5314 		default:
   5315 			break;
   5316 		}
   5317 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   5318 		/*
   5319 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   5320 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   5321 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   5322 		 * Correctly by the Device"
   5323 		 *
   5324 		 * I354(C2000) Errata AVR53:
   5325 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   5326 		 * Hang"
   5327 		 */
   5328 		reg = CSR_READ(sc, WMREG_RFCTL);
   5329 		reg |= WMREG_RFCTL_IPV6EXDIS;
   5330 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   5331 	}
   5332 }
   5333 
   5334 static uint32_t
   5335 wm_rxpbs_adjust_82580(uint32_t val)
   5336 {
   5337 	uint32_t rv = 0;
   5338 
   5339 	if (val < __arraycount(wm_82580_rxpbs_table))
   5340 		rv = wm_82580_rxpbs_table[val];
   5341 
   5342 	return rv;
   5343 }
   5344 
   5345 /*
   5346  * wm_reset_phy:
   5347  *
   5348  *	generic PHY reset function.
   5349  *	Same as e1000_phy_hw_reset_generic()
   5350  */
   5351 static int
   5352 wm_reset_phy(struct wm_softc *sc)
   5353 {
   5354 	uint32_t reg;
   5355 	int rv;
   5356 
   5357 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5358 		device_xname(sc->sc_dev), __func__));
   5359 	if (wm_phy_resetisblocked(sc))
   5360 		return -1;
   5361 
   5362 	rv = sc->phy.acquire(sc);
   5363 	if (rv) {
   5364 		device_printf(sc->sc_dev, "%s: failed to acquire phy: %d\n",
   5365 		    __func__, rv);
   5366 		return rv;
   5367 	}
   5368 
   5369 	reg = CSR_READ(sc, WMREG_CTRL);
   5370 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   5371 	CSR_WRITE_FLUSH(sc);
   5372 
   5373 	delay(sc->phy.reset_delay_us);
   5374 
   5375 	CSR_WRITE(sc, WMREG_CTRL, reg);
   5376 	CSR_WRITE_FLUSH(sc);
   5377 
   5378 	delay(150);
   5379 
   5380 	sc->phy.release(sc);
   5381 
   5382 	wm_get_cfg_done(sc);
   5383 	wm_phy_post_reset(sc);
   5384 
   5385 	return 0;
   5386 }
   5387 
   5388 /*
   5389  * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
   5390  *
   5391  * In i219, the descriptor rings must be emptied before resetting the HW
   5392  * or before changing the device state to D3 during runtime (runtime PM).
   5393  *
   5394  * Failure to do this will cause the HW to enter a unit hang state which can
   5395  * only be released by PCI reset on the device.
   5396  *
   5397  * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
   5398  */
   5399 static void
   5400 wm_flush_desc_rings(struct wm_softc *sc)
   5401 {
   5402 	pcireg_t preg;
   5403 	uint32_t reg;
   5404 	struct wm_txqueue *txq;
   5405 	wiseman_txdesc_t *txd;
   5406 	int nexttx;
   5407 	uint32_t rctl;
   5408 
   5409 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
   5410 
   5411 	/* First, disable MULR fix in FEXTNVM11 */
   5412 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   5413 	reg |= FEXTNVM11_DIS_MULRFIX;
   5414 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   5415 
   5416 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5417 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   5418 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   5419 		return;
   5420 
   5421 	/*
   5422 	 * Remove all descriptors from the tx_ring.
   5423 	 *
   5424 	 * We want to clear all pending descriptors from the TX ring. Zeroing
   5425 	 * happens when the HW reads the regs. We assign the ring itself as
   5426 	 * the data of the next descriptor. We don't care about the data we are
   5427 	 * about to reset the HW.
   5428 	 */
   5429 #ifdef WM_DEBUG
   5430 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
   5431 #endif
   5432 	reg = CSR_READ(sc, WMREG_TCTL);
   5433 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   5434 
   5435 	txq = &sc->sc_queue[0].wmq_txq;
   5436 	nexttx = txq->txq_next;
   5437 	txd = &txq->txq_descs[nexttx];
   5438 	wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
   5439 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   5440 	txd->wtx_fields.wtxu_status = 0;
   5441 	txd->wtx_fields.wtxu_options = 0;
   5442 	txd->wtx_fields.wtxu_vlan = 0;
   5443 
   5444 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5445 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5446 
   5447 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   5448 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   5449 	CSR_WRITE_FLUSH(sc);
   5450 	delay(250);
   5451 
   5452 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5453 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   5454 		return;
   5455 
   5456 	/*
   5457 	 * Mark all descriptors in the RX ring as consumed and disable the
   5458 	 * rx ring.
   5459 	 */
   5460 #ifdef WM_DEBUG
   5461 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   5462 #endif
   5463 	rctl = CSR_READ(sc, WMREG_RCTL);
   5464 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5465 	CSR_WRITE_FLUSH(sc);
   5466 	delay(150);
   5467 
   5468 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   5469 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   5470 	reg &= 0xffffc000;
   5471 	/*
   5472 	 * Update thresholds: prefetch threshold to 31, host threshold
   5473 	 * to 1 and make sure the granularity is "descriptors" and not
   5474 	 * "cache lines"
   5475 	 */
   5476 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   5477 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   5478 
   5479 	/* Momentarily enable the RX ring for the changes to take effect */
   5480 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   5481 	CSR_WRITE_FLUSH(sc);
   5482 	delay(150);
   5483 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5484 }
   5485 
   5486 /*
   5487  * wm_reset:
   5488  *
   5489  *	Reset the i82542 chip.
   5490  */
   5491 static void
   5492 wm_reset(struct wm_softc *sc)
   5493 {
   5494 	int phy_reset = 0;
   5495 	int i, error = 0;
   5496 	uint32_t reg;
   5497 	uint16_t kmreg;
   5498 	int rv;
   5499 
   5500 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5501 		device_xname(sc->sc_dev), __func__));
   5502 	KASSERT(sc->sc_type != 0);
   5503 
   5504 	/*
   5505 	 * Allocate on-chip memory according to the MTU size.
   5506 	 * The Packet Buffer Allocation register must be written
   5507 	 * before the chip is reset.
   5508 	 */
   5509 	switch (sc->sc_type) {
   5510 	case WM_T_82547:
   5511 	case WM_T_82547_2:
   5512 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5513 		    PBA_22K : PBA_30K;
   5514 		for (i = 0; i < sc->sc_nqueues; i++) {
   5515 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5516 			txq->txq_fifo_head = 0;
   5517 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   5518 			txq->txq_fifo_size =
   5519 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   5520 			txq->txq_fifo_stall = 0;
   5521 		}
   5522 		break;
   5523 	case WM_T_82571:
   5524 	case WM_T_82572:
   5525 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   5526 	case WM_T_80003:
   5527 		sc->sc_pba = PBA_32K;
   5528 		break;
   5529 	case WM_T_82573:
   5530 		sc->sc_pba = PBA_12K;
   5531 		break;
   5532 	case WM_T_82574:
   5533 	case WM_T_82583:
   5534 		sc->sc_pba = PBA_20K;
   5535 		break;
   5536 	case WM_T_82576:
   5537 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   5538 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   5539 		break;
   5540 	case WM_T_82580:
   5541 	case WM_T_I350:
   5542 	case WM_T_I354:
   5543 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   5544 		break;
   5545 	case WM_T_I210:
   5546 	case WM_T_I211:
   5547 		sc->sc_pba = PBA_34K;
   5548 		break;
   5549 	case WM_T_ICH8:
   5550 		/* Workaround for a bit corruption issue in FIFO memory */
   5551 		sc->sc_pba = PBA_8K;
   5552 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   5553 		break;
   5554 	case WM_T_ICH9:
   5555 	case WM_T_ICH10:
   5556 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   5557 		    PBA_14K : PBA_10K;
   5558 		break;
   5559 	case WM_T_PCH:
   5560 	case WM_T_PCH2:	/* XXX 14K? */
   5561 	case WM_T_PCH_LPT:
   5562 	case WM_T_PCH_SPT:
   5563 	case WM_T_PCH_CNP:
   5564 	case WM_T_PCH_TGP:
   5565 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
   5566 		    PBA_12K : PBA_26K;
   5567 		break;
   5568 	default:
   5569 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5570 		    PBA_40K : PBA_48K;
   5571 		break;
   5572 	}
   5573 	/*
   5574 	 * Only old or non-multiqueue devices have the PBA register
   5575 	 * XXX Need special handling for 82575.
   5576 	 */
   5577 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5578 	    || (sc->sc_type == WM_T_82575))
   5579 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   5580 
   5581 	/* Prevent the PCI-E bus from sticking */
   5582 	if (sc->sc_flags & WM_F_PCIE) {
   5583 		int timeout = 800;
   5584 
   5585 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   5586 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5587 
   5588 		while (timeout--) {
   5589 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   5590 			    == 0)
   5591 				break;
   5592 			delay(100);
   5593 		}
   5594 		if (timeout == 0)
   5595 			device_printf(sc->sc_dev,
   5596 			    "failed to disable bus mastering\n");
   5597 	}
   5598 
   5599 	/* Set the completion timeout for interface */
   5600 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   5601 	    || (sc->sc_type == WM_T_82580)
   5602 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5603 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   5604 		wm_set_pcie_completion_timeout(sc);
   5605 
   5606 	/* Clear interrupt */
   5607 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5608 	if (wm_is_using_msix(sc)) {
   5609 		if (sc->sc_type != WM_T_82574) {
   5610 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5611 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5612 		} else
   5613 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5614 	}
   5615 
   5616 	/* Stop the transmit and receive processes. */
   5617 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5618 	sc->sc_rctl &= ~RCTL_EN;
   5619 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   5620 	CSR_WRITE_FLUSH(sc);
   5621 
   5622 	/* XXX set_tbi_sbp_82543() */
   5623 
   5624 	delay(10*1000);
   5625 
   5626 	/* Must acquire the MDIO ownership before MAC reset */
   5627 	switch (sc->sc_type) {
   5628 	case WM_T_82573:
   5629 	case WM_T_82574:
   5630 	case WM_T_82583:
   5631 		error = wm_get_hw_semaphore_82573(sc);
   5632 		break;
   5633 	default:
   5634 		break;
   5635 	}
   5636 
   5637 	/*
   5638 	 * 82541 Errata 29? & 82547 Errata 28?
   5639 	 * See also the description about PHY_RST bit in CTRL register
   5640 	 * in 8254x_GBe_SDM.pdf.
   5641 	 */
   5642 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   5643 		CSR_WRITE(sc, WMREG_CTRL,
   5644 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   5645 		CSR_WRITE_FLUSH(sc);
   5646 		delay(5000);
   5647 	}
   5648 
   5649 	switch (sc->sc_type) {
   5650 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   5651 	case WM_T_82541:
   5652 	case WM_T_82541_2:
   5653 	case WM_T_82547:
   5654 	case WM_T_82547_2:
   5655 		/*
   5656 		 * On some chipsets, a reset through a memory-mapped write
   5657 		 * cycle can cause the chip to reset before completing the
   5658 		 * write cycle. This causes major headache that can be avoided
   5659 		 * by issuing the reset via indirect register writes through
   5660 		 * I/O space.
   5661 		 *
   5662 		 * So, if we successfully mapped the I/O BAR at attach time,
   5663 		 * use that. Otherwise, try our luck with a memory-mapped
   5664 		 * reset.
   5665 		 */
   5666 		if (sc->sc_flags & WM_F_IOH_VALID)
   5667 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   5668 		else
   5669 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   5670 		break;
   5671 	case WM_T_82545_3:
   5672 	case WM_T_82546_3:
   5673 		/* Use the shadow control register on these chips. */
   5674 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   5675 		break;
   5676 	case WM_T_80003:
   5677 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5678 		if (sc->phy.acquire(sc) != 0)
   5679 			break;
   5680 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5681 		sc->phy.release(sc);
   5682 		break;
   5683 	case WM_T_ICH8:
   5684 	case WM_T_ICH9:
   5685 	case WM_T_ICH10:
   5686 	case WM_T_PCH:
   5687 	case WM_T_PCH2:
   5688 	case WM_T_PCH_LPT:
   5689 	case WM_T_PCH_SPT:
   5690 	case WM_T_PCH_CNP:
   5691 	case WM_T_PCH_TGP:
   5692 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5693 		if (wm_phy_resetisblocked(sc) == false) {
   5694 			/*
   5695 			 * Gate automatic PHY configuration by hardware on
   5696 			 * non-managed 82579
   5697 			 */
   5698 			if ((sc->sc_type == WM_T_PCH2)
   5699 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   5700 				== 0))
   5701 				wm_gate_hw_phy_config_ich8lan(sc, true);
   5702 
   5703 			reg |= CTRL_PHY_RESET;
   5704 			phy_reset = 1;
   5705 		} else
   5706 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   5707 		if (sc->phy.acquire(sc) != 0)
   5708 			break;
   5709 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5710 		/* Don't insert a completion barrier when reset */
   5711 		delay(20*1000);
   5712 		/*
   5713 		 * The EXTCNFCTR_MDIO_SW_OWNERSHIP bit is cleared by the reset,
   5714 		 * so don't use sc->phy.release(sc). Release sc_ich_phymtx
   5715 		 * only. See also wm_get_swflag_ich8lan().
   5716 		 */
   5717 		mutex_exit(sc->sc_ich_phymtx);
   5718 		break;
   5719 	case WM_T_82580:
   5720 	case WM_T_I350:
   5721 	case WM_T_I354:
   5722 	case WM_T_I210:
   5723 	case WM_T_I211:
   5724 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5725 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5726 			CSR_WRITE_FLUSH(sc);
   5727 		delay(5000);
   5728 		break;
   5729 	case WM_T_82542_2_0:
   5730 	case WM_T_82542_2_1:
   5731 	case WM_T_82543:
   5732 	case WM_T_82540:
   5733 	case WM_T_82545:
   5734 	case WM_T_82546:
   5735 	case WM_T_82571:
   5736 	case WM_T_82572:
   5737 	case WM_T_82573:
   5738 	case WM_T_82574:
   5739 	case WM_T_82575:
   5740 	case WM_T_82576:
   5741 	case WM_T_82583:
   5742 	default:
   5743 		/* Everything else can safely use the documented method. */
   5744 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5745 		break;
   5746 	}
   5747 
   5748 	/* Must release the MDIO ownership after MAC reset */
   5749 	switch (sc->sc_type) {
   5750 	case WM_T_82573:
   5751 	case WM_T_82574:
   5752 	case WM_T_82583:
   5753 		if (error == 0)
   5754 			wm_put_hw_semaphore_82573(sc);
   5755 		break;
   5756 	default:
   5757 		break;
   5758 	}
   5759 
   5760 	/* Set Phy Config Counter to 50msec */
   5761 	if (sc->sc_type == WM_T_PCH2) {
   5762 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5763 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5764 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5765 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5766 	}
   5767 
   5768 	if (phy_reset != 0)
   5769 		wm_get_cfg_done(sc);
   5770 
   5771 	/* Reload EEPROM */
   5772 	switch (sc->sc_type) {
   5773 	case WM_T_82542_2_0:
   5774 	case WM_T_82542_2_1:
   5775 	case WM_T_82543:
   5776 	case WM_T_82544:
   5777 		delay(10);
   5778 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5779 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5780 		CSR_WRITE_FLUSH(sc);
   5781 		delay(2000);
   5782 		break;
   5783 	case WM_T_82540:
   5784 	case WM_T_82545:
   5785 	case WM_T_82545_3:
   5786 	case WM_T_82546:
   5787 	case WM_T_82546_3:
   5788 		delay(5*1000);
   5789 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5790 		break;
   5791 	case WM_T_82541:
   5792 	case WM_T_82541_2:
   5793 	case WM_T_82547:
   5794 	case WM_T_82547_2:
   5795 		delay(20000);
   5796 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5797 		break;
   5798 	case WM_T_82571:
   5799 	case WM_T_82572:
   5800 	case WM_T_82573:
   5801 	case WM_T_82574:
   5802 	case WM_T_82583:
   5803 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5804 			delay(10);
   5805 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5806 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5807 			CSR_WRITE_FLUSH(sc);
   5808 		}
   5809 		/* check EECD_EE_AUTORD */
   5810 		wm_get_auto_rd_done(sc);
   5811 		/*
   5812 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5813 		 * is set.
   5814 		 */
   5815 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5816 		    || (sc->sc_type == WM_T_82583))
   5817 			delay(25*1000);
   5818 		break;
   5819 	case WM_T_82575:
   5820 	case WM_T_82576:
   5821 	case WM_T_82580:
   5822 	case WM_T_I350:
   5823 	case WM_T_I354:
   5824 	case WM_T_I210:
   5825 	case WM_T_I211:
   5826 	case WM_T_80003:
   5827 		/* check EECD_EE_AUTORD */
   5828 		wm_get_auto_rd_done(sc);
   5829 		break;
   5830 	case WM_T_ICH8:
   5831 	case WM_T_ICH9:
   5832 	case WM_T_ICH10:
   5833 	case WM_T_PCH:
   5834 	case WM_T_PCH2:
   5835 	case WM_T_PCH_LPT:
   5836 	case WM_T_PCH_SPT:
   5837 	case WM_T_PCH_CNP:
   5838 	case WM_T_PCH_TGP:
   5839 		break;
   5840 	default:
   5841 		panic("%s: unknown type\n", __func__);
   5842 	}
   5843 
   5844 	/* Check whether EEPROM is present or not */
   5845 	switch (sc->sc_type) {
   5846 	case WM_T_82575:
   5847 	case WM_T_82576:
   5848 	case WM_T_82580:
   5849 	case WM_T_I350:
   5850 	case WM_T_I354:
   5851 	case WM_T_ICH8:
   5852 	case WM_T_ICH9:
   5853 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5854 			/* Not found */
   5855 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5856 			if (sc->sc_type == WM_T_82575)
   5857 				wm_reset_init_script_82575(sc);
   5858 		}
   5859 		break;
   5860 	default:
   5861 		break;
   5862 	}
   5863 
   5864 	if (phy_reset != 0)
   5865 		wm_phy_post_reset(sc);
   5866 
   5867 	if ((sc->sc_type == WM_T_82580)
   5868 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5869 		/* Clear global device reset status bit */
   5870 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5871 	}
   5872 
   5873 	/* Clear any pending interrupt events. */
   5874 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5875 	reg = CSR_READ(sc, WMREG_ICR);
   5876 	if (wm_is_using_msix(sc)) {
   5877 		if (sc->sc_type != WM_T_82574) {
   5878 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5879 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5880 		} else
   5881 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5882 	}
   5883 
   5884 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5885 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5886 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5887 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)
   5888 	    || (sc->sc_type == WM_T_PCH_TGP)) {
   5889 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5890 		reg |= KABGTXD_BGSQLBIAS;
   5891 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5892 	}
   5893 
   5894 	/* Reload sc_ctrl */
   5895 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5896 
   5897 	wm_set_eee(sc);
   5898 
   5899 	/*
   5900 	 * For PCH, this write will make sure that any noise will be detected
   5901 	 * as a CRC error and be dropped rather than show up as a bad packet
   5902 	 * to the DMA engine
   5903 	 */
   5904 	if (sc->sc_type == WM_T_PCH)
   5905 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5906 
   5907 	if (sc->sc_type >= WM_T_82544)
   5908 		CSR_WRITE(sc, WMREG_WUC, 0);
   5909 
   5910 	if (sc->sc_type < WM_T_82575)
   5911 		wm_disable_aspm(sc); /* Workaround for some chips */
   5912 
   5913 	wm_reset_mdicnfg_82580(sc);
   5914 
   5915 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5916 		wm_pll_workaround_i210(sc);
   5917 
   5918 	if (sc->sc_type == WM_T_80003) {
   5919 		/* Default to TRUE to enable the MDIC W/A */
   5920 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5921 
   5922 		rv = wm_kmrn_readreg(sc,
   5923 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5924 		if (rv == 0) {
   5925 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5926 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5927 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5928 			else
   5929 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5930 		}
   5931 	}
   5932 }
   5933 
   5934 /*
   5935  * wm_add_rxbuf:
   5936  *
   5937  *	Add a receive buffer to the indiciated descriptor.
   5938  */
   5939 static int
   5940 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5941 {
   5942 	struct wm_softc *sc = rxq->rxq_sc;
   5943 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5944 	struct mbuf *m;
   5945 	int error;
   5946 
   5947 	KASSERT(mutex_owned(rxq->rxq_lock));
   5948 
   5949 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5950 	if (m == NULL)
   5951 		return ENOBUFS;
   5952 
   5953 	MCLGET(m, M_DONTWAIT);
   5954 	if ((m->m_flags & M_EXT) == 0) {
   5955 		m_freem(m);
   5956 		return ENOBUFS;
   5957 	}
   5958 
   5959 	if (rxs->rxs_mbuf != NULL)
   5960 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5961 
   5962 	rxs->rxs_mbuf = m;
   5963 
   5964 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5965 	/*
   5966 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5967 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5968 	 */
   5969 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5970 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5971 	if (error) {
   5972 		/* XXX XXX XXX */
   5973 		aprint_error_dev(sc->sc_dev,
   5974 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5975 		panic("wm_add_rxbuf");
   5976 	}
   5977 
   5978 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5979 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5980 
   5981 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5982 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5983 			wm_init_rxdesc(rxq, idx);
   5984 	} else
   5985 		wm_init_rxdesc(rxq, idx);
   5986 
   5987 	return 0;
   5988 }
   5989 
   5990 /*
   5991  * wm_rxdrain:
   5992  *
   5993  *	Drain the receive queue.
   5994  */
   5995 static void
   5996 wm_rxdrain(struct wm_rxqueue *rxq)
   5997 {
   5998 	struct wm_softc *sc = rxq->rxq_sc;
   5999 	struct wm_rxsoft *rxs;
   6000 	int i;
   6001 
   6002 	KASSERT(mutex_owned(rxq->rxq_lock));
   6003 
   6004 	for (i = 0; i < WM_NRXDESC; i++) {
   6005 		rxs = &rxq->rxq_soft[i];
   6006 		if (rxs->rxs_mbuf != NULL) {
   6007 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   6008 			m_freem(rxs->rxs_mbuf);
   6009 			rxs->rxs_mbuf = NULL;
   6010 		}
   6011 	}
   6012 }
   6013 
   6014 /*
   6015  * Setup registers for RSS.
   6016  *
   6017  * XXX not yet VMDq support
   6018  */
   6019 static void
   6020 wm_init_rss(struct wm_softc *sc)
   6021 {
   6022 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   6023 	int i;
   6024 
   6025 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   6026 
   6027 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   6028 		unsigned int qid, reta_ent;
   6029 
   6030 		qid  = i % sc->sc_nqueues;
   6031 		switch (sc->sc_type) {
   6032 		case WM_T_82574:
   6033 			reta_ent = __SHIFTIN(qid,
   6034 			    RETA_ENT_QINDEX_MASK_82574);
   6035 			break;
   6036 		case WM_T_82575:
   6037 			reta_ent = __SHIFTIN(qid,
   6038 			    RETA_ENT_QINDEX1_MASK_82575);
   6039 			break;
   6040 		default:
   6041 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   6042 			break;
   6043 		}
   6044 
   6045 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   6046 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   6047 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   6048 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   6049 	}
   6050 
   6051 	rss_getkey((uint8_t *)rss_key);
   6052 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   6053 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   6054 
   6055 	if (sc->sc_type == WM_T_82574)
   6056 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   6057 	else
   6058 		mrqc = MRQC_ENABLE_RSS_MQ;
   6059 
   6060 	/*
   6061 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   6062 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   6063 	 */
   6064 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   6065 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   6066 #if 0
   6067 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   6068 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   6069 #endif
   6070 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   6071 
   6072 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   6073 }
   6074 
   6075 /*
   6076  * Adjust TX and RX queue numbers which the system actulally uses.
   6077  *
   6078  * The numbers are affected by below parameters.
   6079  *     - The nubmer of hardware queues
   6080  *     - The number of MSI-X vectors (= "nvectors" argument)
   6081  *     - ncpu
   6082  */
   6083 static void
   6084 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   6085 {
   6086 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   6087 
   6088 	if (nvectors < 2) {
   6089 		sc->sc_nqueues = 1;
   6090 		return;
   6091 	}
   6092 
   6093 	switch (sc->sc_type) {
   6094 	case WM_T_82572:
   6095 		hw_ntxqueues = 2;
   6096 		hw_nrxqueues = 2;
   6097 		break;
   6098 	case WM_T_82574:
   6099 		hw_ntxqueues = 2;
   6100 		hw_nrxqueues = 2;
   6101 		break;
   6102 	case WM_T_82575:
   6103 		hw_ntxqueues = 4;
   6104 		hw_nrxqueues = 4;
   6105 		break;
   6106 	case WM_T_82576:
   6107 		hw_ntxqueues = 16;
   6108 		hw_nrxqueues = 16;
   6109 		break;
   6110 	case WM_T_82580:
   6111 	case WM_T_I350:
   6112 	case WM_T_I354:
   6113 		hw_ntxqueues = 8;
   6114 		hw_nrxqueues = 8;
   6115 		break;
   6116 	case WM_T_I210:
   6117 		hw_ntxqueues = 4;
   6118 		hw_nrxqueues = 4;
   6119 		break;
   6120 	case WM_T_I211:
   6121 		hw_ntxqueues = 2;
   6122 		hw_nrxqueues = 2;
   6123 		break;
   6124 		/*
   6125 		 * The below Ethernet controllers do not support MSI-X;
   6126 		 * this driver doesn't let them use multiqueue.
   6127 		 *     - WM_T_80003
   6128 		 *     - WM_T_ICH8
   6129 		 *     - WM_T_ICH9
   6130 		 *     - WM_T_ICH10
   6131 		 *     - WM_T_PCH
   6132 		 *     - WM_T_PCH2
   6133 		 *     - WM_T_PCH_LPT
   6134 		 */
   6135 	default:
   6136 		hw_ntxqueues = 1;
   6137 		hw_nrxqueues = 1;
   6138 		break;
   6139 	}
   6140 
   6141 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   6142 
   6143 	/*
   6144 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   6145 	 * the number of queues used actually.
   6146 	 */
   6147 	if (nvectors < hw_nqueues + 1)
   6148 		sc->sc_nqueues = nvectors - 1;
   6149 	else
   6150 		sc->sc_nqueues = hw_nqueues;
   6151 
   6152 	/*
   6153 	 * As queues more than CPUs cannot improve scaling, we limit
   6154 	 * the number of queues used actually.
   6155 	 */
   6156 	if (ncpu < sc->sc_nqueues)
   6157 		sc->sc_nqueues = ncpu;
   6158 }
   6159 
   6160 static inline bool
   6161 wm_is_using_msix(struct wm_softc *sc)
   6162 {
   6163 
   6164 	return (sc->sc_nintrs > 1);
   6165 }
   6166 
   6167 static inline bool
   6168 wm_is_using_multiqueue(struct wm_softc *sc)
   6169 {
   6170 
   6171 	return (sc->sc_nqueues > 1);
   6172 }
   6173 
   6174 static int
   6175 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   6176 {
   6177 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   6178 
   6179 	wmq->wmq_id = qidx;
   6180 	wmq->wmq_intr_idx = intr_idx;
   6181 	wmq->wmq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
   6182 	    wm_handle_queue, wmq);
   6183 	if (wmq->wmq_si != NULL)
   6184 		return 0;
   6185 
   6186 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   6187 	    wmq->wmq_id);
   6188 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   6189 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6190 	return ENOMEM;
   6191 }
   6192 
   6193 /*
   6194  * Both single interrupt MSI and INTx can use this function.
   6195  */
   6196 static int
   6197 wm_setup_legacy(struct wm_softc *sc)
   6198 {
   6199 	pci_chipset_tag_t pc = sc->sc_pc;
   6200 	const char *intrstr = NULL;
   6201 	char intrbuf[PCI_INTRSTR_LEN];
   6202 	int error;
   6203 
   6204 	error = wm_alloc_txrx_queues(sc);
   6205 	if (error) {
   6206 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6207 		    error);
   6208 		return ENOMEM;
   6209 	}
   6210 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   6211 	    sizeof(intrbuf));
   6212 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   6213 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   6214 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   6215 	if (sc->sc_ihs[0] == NULL) {
   6216 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   6217 		    (pci_intr_type(pc, sc->sc_intrs[0])
   6218 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   6219 		return ENOMEM;
   6220 	}
   6221 
   6222 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   6223 	sc->sc_nintrs = 1;
   6224 
   6225 	return wm_softint_establish_queue(sc, 0, 0);
   6226 }
   6227 
   6228 static int
   6229 wm_setup_msix(struct wm_softc *sc)
   6230 {
   6231 	void *vih;
   6232 	kcpuset_t *affinity;
   6233 	int qidx, error, intr_idx, txrx_established;
   6234 	pci_chipset_tag_t pc = sc->sc_pc;
   6235 	const char *intrstr = NULL;
   6236 	char intrbuf[PCI_INTRSTR_LEN];
   6237 	char intr_xname[INTRDEVNAMEBUF];
   6238 
   6239 	if (sc->sc_nqueues < ncpu) {
   6240 		/*
   6241 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   6242 		 * interrupts start from CPU#1.
   6243 		 */
   6244 		sc->sc_affinity_offset = 1;
   6245 	} else {
   6246 		/*
   6247 		 * In this case, this device use all CPUs. So, we unify
   6248 		 * affinitied cpu_index to msix vector number for readability.
   6249 		 */
   6250 		sc->sc_affinity_offset = 0;
   6251 	}
   6252 
   6253 	error = wm_alloc_txrx_queues(sc);
   6254 	if (error) {
   6255 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6256 		    error);
   6257 		return ENOMEM;
   6258 	}
   6259 
   6260 	kcpuset_create(&affinity, false);
   6261 	intr_idx = 0;
   6262 
   6263 	/*
   6264 	 * TX and RX
   6265 	 */
   6266 	txrx_established = 0;
   6267 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6268 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6269 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   6270 
   6271 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6272 		    sizeof(intrbuf));
   6273 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   6274 		    PCI_INTR_MPSAFE, true);
   6275 		memset(intr_xname, 0, sizeof(intr_xname));
   6276 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   6277 		    device_xname(sc->sc_dev), qidx);
   6278 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6279 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   6280 		if (vih == NULL) {
   6281 			aprint_error_dev(sc->sc_dev,
   6282 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   6283 			    intrstr ? " at " : "",
   6284 			    intrstr ? intrstr : "");
   6285 
   6286 			goto fail;
   6287 		}
   6288 		kcpuset_zero(affinity);
   6289 		/* Round-robin affinity */
   6290 		kcpuset_set(affinity, affinity_to);
   6291 		error = interrupt_distribute(vih, affinity, NULL);
   6292 		if (error == 0) {
   6293 			aprint_normal_dev(sc->sc_dev,
   6294 			    "for TX and RX interrupting at %s affinity to %u\n",
   6295 			    intrstr, affinity_to);
   6296 		} else {
   6297 			aprint_normal_dev(sc->sc_dev,
   6298 			    "for TX and RX interrupting at %s\n", intrstr);
   6299 		}
   6300 		sc->sc_ihs[intr_idx] = vih;
   6301 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   6302 			goto fail;
   6303 		txrx_established++;
   6304 		intr_idx++;
   6305 	}
   6306 
   6307 	/* LINK */
   6308 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6309 	    sizeof(intrbuf));
   6310 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   6311 	memset(intr_xname, 0, sizeof(intr_xname));
   6312 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   6313 	    device_xname(sc->sc_dev));
   6314 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6315 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   6316 	if (vih == NULL) {
   6317 		aprint_error_dev(sc->sc_dev,
   6318 		    "unable to establish MSI-X(for LINK)%s%s\n",
   6319 		    intrstr ? " at " : "",
   6320 		    intrstr ? intrstr : "");
   6321 
   6322 		goto fail;
   6323 	}
   6324 	/* Keep default affinity to LINK interrupt */
   6325 	aprint_normal_dev(sc->sc_dev,
   6326 	    "for LINK interrupting at %s\n", intrstr);
   6327 	sc->sc_ihs[intr_idx] = vih;
   6328 	sc->sc_link_intr_idx = intr_idx;
   6329 
   6330 	sc->sc_nintrs = sc->sc_nqueues + 1;
   6331 	kcpuset_destroy(affinity);
   6332 	return 0;
   6333 
   6334 fail:
   6335 	for (qidx = 0; qidx < txrx_established; qidx++) {
   6336 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6337 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   6338 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6339 	}
   6340 
   6341 	kcpuset_destroy(affinity);
   6342 	return ENOMEM;
   6343 }
   6344 
   6345 static void
   6346 wm_unset_stopping_flags(struct wm_softc *sc)
   6347 {
   6348 	int i;
   6349 
   6350 	KASSERT(mutex_owned(sc->sc_core_lock));
   6351 
   6352 	/* Must unset stopping flags in ascending order. */
   6353 	for (i = 0; i < sc->sc_nqueues; i++) {
   6354 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6355 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6356 
   6357 		mutex_enter(txq->txq_lock);
   6358 		txq->txq_stopping = false;
   6359 		mutex_exit(txq->txq_lock);
   6360 
   6361 		mutex_enter(rxq->rxq_lock);
   6362 		rxq->rxq_stopping = false;
   6363 		mutex_exit(rxq->rxq_lock);
   6364 	}
   6365 
   6366 	sc->sc_core_stopping = false;
   6367 }
   6368 
   6369 static void
   6370 wm_set_stopping_flags(struct wm_softc *sc)
   6371 {
   6372 	int i;
   6373 
   6374 	KASSERT(mutex_owned(sc->sc_core_lock));
   6375 
   6376 	sc->sc_core_stopping = true;
   6377 
   6378 	/* Must set stopping flags in ascending order. */
   6379 	for (i = 0; i < sc->sc_nqueues; i++) {
   6380 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6381 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6382 
   6383 		mutex_enter(rxq->rxq_lock);
   6384 		rxq->rxq_stopping = true;
   6385 		mutex_exit(rxq->rxq_lock);
   6386 
   6387 		mutex_enter(txq->txq_lock);
   6388 		txq->txq_stopping = true;
   6389 		mutex_exit(txq->txq_lock);
   6390 	}
   6391 }
   6392 
   6393 /*
   6394  * Write interrupt interval value to ITR or EITR
   6395  */
   6396 static void
   6397 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   6398 {
   6399 
   6400 	if (!wmq->wmq_set_itr)
   6401 		return;
   6402 
   6403 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6404 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   6405 
   6406 		/*
   6407 		 * 82575 doesn't have CNT_INGR field.
   6408 		 * So, overwrite counter field by software.
   6409 		 */
   6410 		if (sc->sc_type == WM_T_82575)
   6411 			eitr |= __SHIFTIN(wmq->wmq_itr,
   6412 			    EITR_COUNTER_MASK_82575);
   6413 		else
   6414 			eitr |= EITR_CNT_INGR;
   6415 
   6416 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   6417 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   6418 		/*
   6419 		 * 82574 has both ITR and EITR. SET EITR when we use
   6420 		 * the multi queue function with MSI-X.
   6421 		 */
   6422 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   6423 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   6424 	} else {
   6425 		KASSERT(wmq->wmq_id == 0);
   6426 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   6427 	}
   6428 
   6429 	wmq->wmq_set_itr = false;
   6430 }
   6431 
   6432 /*
   6433  * TODO
   6434  * Below dynamic calculation of itr is almost the same as Linux igb,
   6435  * however it does not fit to wm(4). So, we will have been disable AIM
   6436  * until we will find appropriate calculation of itr.
   6437  */
   6438 /*
   6439  * Calculate interrupt interval value to be going to write register in
   6440  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   6441  */
   6442 static void
   6443 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   6444 {
   6445 #ifdef NOTYET
   6446 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6447 	struct wm_txqueue *txq = &wmq->wmq_txq;
   6448 	uint32_t avg_size = 0;
   6449 	uint32_t new_itr;
   6450 
   6451 	if (rxq->rxq_packets)
   6452 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   6453 	if (txq->txq_packets)
   6454 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   6455 
   6456 	if (avg_size == 0) {
   6457 		new_itr = 450; /* restore default value */
   6458 		goto out;
   6459 	}
   6460 
   6461 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   6462 	avg_size += 24;
   6463 
   6464 	/* Don't starve jumbo frames */
   6465 	avg_size = uimin(avg_size, 3000);
   6466 
   6467 	/* Give a little boost to mid-size frames */
   6468 	if ((avg_size > 300) && (avg_size < 1200))
   6469 		new_itr = avg_size / 3;
   6470 	else
   6471 		new_itr = avg_size / 2;
   6472 
   6473 out:
   6474 	/*
   6475 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   6476 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   6477 	 */
   6478 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   6479 		new_itr *= 4;
   6480 
   6481 	if (new_itr != wmq->wmq_itr) {
   6482 		wmq->wmq_itr = new_itr;
   6483 		wmq->wmq_set_itr = true;
   6484 	} else
   6485 		wmq->wmq_set_itr = false;
   6486 
   6487 	rxq->rxq_packets = 0;
   6488 	rxq->rxq_bytes = 0;
   6489 	txq->txq_packets = 0;
   6490 	txq->txq_bytes = 0;
   6491 #endif
   6492 }
   6493 
   6494 static void
   6495 wm_init_sysctls(struct wm_softc *sc)
   6496 {
   6497 	struct sysctllog **log;
   6498 	const struct sysctlnode *rnode, *qnode, *cnode;
   6499 	int i, rv;
   6500 	const char *dvname;
   6501 
   6502 	log = &sc->sc_sysctllog;
   6503 	dvname = device_xname(sc->sc_dev);
   6504 
   6505 	rv = sysctl_createv(log, 0, NULL, &rnode,
   6506 	    0, CTLTYPE_NODE, dvname,
   6507 	    SYSCTL_DESCR("wm information and settings"),
   6508 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   6509 	if (rv != 0)
   6510 		goto err;
   6511 
   6512 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6513 	    CTLTYPE_BOOL, "txrx_workqueue",
   6514 	    SYSCTL_DESCR("Use workqueue for packet processing"),
   6515 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   6516 	if (rv != 0)
   6517 		goto teardown;
   6518 
   6519 	for (i = 0; i < sc->sc_nqueues; i++) {
   6520 		struct wm_queue *wmq = &sc->sc_queue[i];
   6521 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6522 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6523 
   6524 		snprintf(sc->sc_queue[i].sysctlname,
   6525 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
   6526 
   6527 		if (sysctl_createv(log, 0, &rnode, &qnode,
   6528 		    0, CTLTYPE_NODE,
   6529 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
   6530 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   6531 			break;
   6532 
   6533 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6534 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6535 		    "txq_free", SYSCTL_DESCR("TX queue free"),
   6536 		    NULL, 0, &txq->txq_free,
   6537 		    0, CTL_CREATE, CTL_EOL) != 0)
   6538 			break;
   6539 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6540 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6541 		    "txd_head", SYSCTL_DESCR("TX descriptor head"),
   6542 		    wm_sysctl_tdh_handler, 0, (void *)txq,
   6543 		    0, CTL_CREATE, CTL_EOL) != 0)
   6544 			break;
   6545 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6546 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6547 		    "txd_tail", SYSCTL_DESCR("TX descriptor tail"),
   6548 		    wm_sysctl_tdt_handler, 0, (void *)txq,
   6549 		    0, CTL_CREATE, CTL_EOL) != 0)
   6550 			break;
   6551 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6552 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6553 		    "txq_next", SYSCTL_DESCR("TX queue next"),
   6554 		    NULL, 0, &txq->txq_next,
   6555 		    0, CTL_CREATE, CTL_EOL) != 0)
   6556 			break;
   6557 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6558 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6559 		    "txq_sfree", SYSCTL_DESCR("TX queue sfree"),
   6560 		    NULL, 0, &txq->txq_sfree,
   6561 		    0, CTL_CREATE, CTL_EOL) != 0)
   6562 			break;
   6563 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6564 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6565 		    "txq_snext", SYSCTL_DESCR("TX queue snext"),
   6566 		    NULL, 0, &txq->txq_snext,
   6567 		    0, CTL_CREATE, CTL_EOL) != 0)
   6568 			break;
   6569 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6570 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6571 		    "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"),
   6572 		    NULL, 0, &txq->txq_sdirty,
   6573 		    0, CTL_CREATE, CTL_EOL) != 0)
   6574 			break;
   6575 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6576 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6577 		    "txq_flags", SYSCTL_DESCR("TX queue flags"),
   6578 		    NULL, 0, &txq->txq_flags,
   6579 		    0, CTL_CREATE, CTL_EOL) != 0)
   6580 			break;
   6581 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6582 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6583 		    "txq_stopping", SYSCTL_DESCR("TX queue stopping"),
   6584 		    NULL, 0, &txq->txq_stopping,
   6585 		    0, CTL_CREATE, CTL_EOL) != 0)
   6586 			break;
   6587 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6588 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6589 		    "txq_sending", SYSCTL_DESCR("TX queue sending"),
   6590 		    NULL, 0, &txq->txq_sending,
   6591 		    0, CTL_CREATE, CTL_EOL) != 0)
   6592 			break;
   6593 
   6594 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6595 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6596 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
   6597 		    NULL, 0, &rxq->rxq_ptr,
   6598 		    0, CTL_CREATE, CTL_EOL) != 0)
   6599 			break;
   6600 	}
   6601 
   6602 #ifdef WM_DEBUG
   6603 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6604 	    CTLTYPE_INT, "debug_flags",
   6605 	    SYSCTL_DESCR(
   6606 		    "Debug flags:\n"	\
   6607 		    "\t0x01 LINK\n"	\
   6608 		    "\t0x02 TX\n"	\
   6609 		    "\t0x04 RX\n"	\
   6610 		    "\t0x08 GMII\n"	\
   6611 		    "\t0x10 MANAGE\n"	\
   6612 		    "\t0x20 NVM\n"	\
   6613 		    "\t0x40 INIT\n"	\
   6614 		    "\t0x80 LOCK"),
   6615 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
   6616 	if (rv != 0)
   6617 		goto teardown;
   6618 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6619 	    CTLTYPE_BOOL, "trigger_reset",
   6620 	    SYSCTL_DESCR("Trigger an interface reset"),
   6621 	    NULL, 0, &sc->sc_trigger_reset, 0, CTL_CREATE, CTL_EOL);
   6622 	if (rv != 0)
   6623 		goto teardown;
   6624 #endif
   6625 
   6626 	return;
   6627 
   6628 teardown:
   6629 	sysctl_teardown(log);
   6630 err:
   6631 	sc->sc_sysctllog = NULL;
   6632 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   6633 	    __func__, rv);
   6634 }
   6635 
   6636 static void
   6637 wm_update_stats(struct wm_softc *sc)
   6638 {
   6639 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6640 	uint64_t crcerrs, algnerrc, symerrc, mpc, colc,  sec, rlec, rxerrc,
   6641 	    cexterr;
   6642 	uint64_t total_qdrop = 0;
   6643 	int i;
   6644 
   6645 	crcerrs = CSR_READ(sc, WMREG_CRCERRS);
   6646 	symerrc = CSR_READ(sc, WMREG_SYMERRC);
   6647 	mpc = CSR_READ(sc, WMREG_MPC);
   6648 	colc = CSR_READ(sc, WMREG_COLC);
   6649 	sec = CSR_READ(sc, WMREG_SEC);
   6650 	rlec = CSR_READ(sc, WMREG_RLEC);
   6651 
   6652 	WM_EVCNT_ADD(&sc->sc_ev_crcerrs, crcerrs);
   6653 	WM_EVCNT_ADD(&sc->sc_ev_symerrc, symerrc);
   6654 	WM_EVCNT_ADD(&sc->sc_ev_mpc, mpc);
   6655 	WM_EVCNT_ADD(&sc->sc_ev_colc, colc);
   6656 	WM_EVCNT_ADD(&sc->sc_ev_sec, sec);
   6657 	WM_EVCNT_ADD(&sc->sc_ev_rlec, rlec);
   6658 
   6659 	if (sc->sc_type >= WM_T_82543) {
   6660 		algnerrc = CSR_READ(sc, WMREG_ALGNERRC);
   6661 		rxerrc = CSR_READ(sc, WMREG_RXERRC);
   6662 		WM_EVCNT_ADD(&sc->sc_ev_algnerrc, algnerrc);
   6663 		WM_EVCNT_ADD(&sc->sc_ev_rxerrc, rxerrc);
   6664 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc)) {
   6665 			cexterr = CSR_READ(sc, WMREG_CEXTERR);
   6666 			WM_EVCNT_ADD(&sc->sc_ev_cexterr, cexterr);
   6667 		} else {
   6668 			cexterr = 0;
   6669 			/* Excessive collision + Link down */
   6670 			WM_EVCNT_ADD(&sc->sc_ev_htdpmc,
   6671 			    CSR_READ(sc, WMREG_HTDPMC));
   6672 		}
   6673 
   6674 		WM_EVCNT_ADD(&sc->sc_ev_tncrs, CSR_READ(sc, WMREG_TNCRS));
   6675 		WM_EVCNT_ADD(&sc->sc_ev_tsctc, CSR_READ(sc, WMREG_TSCTC));
   6676 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   6677 			WM_EVCNT_ADD(&sc->sc_ev_tsctfc,
   6678 			    CSR_READ(sc, WMREG_TSCTFC));
   6679 		else {
   6680 			WM_EVCNT_ADD(&sc->sc_ev_cbrdpc,
   6681 			    CSR_READ(sc, WMREG_CBRDPC));
   6682 			WM_EVCNT_ADD(&sc->sc_ev_cbrmpc,
   6683 			    CSR_READ(sc, WMREG_CBRMPC));
   6684 		}
   6685 	} else
   6686 		algnerrc = rxerrc = cexterr = 0;
   6687 
   6688 	if (sc->sc_type >= WM_T_82542_2_1) {
   6689 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   6690 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   6691 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   6692 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   6693 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   6694 	}
   6695 
   6696 	WM_EVCNT_ADD(&sc->sc_ev_scc, CSR_READ(sc, WMREG_SCC));
   6697 	WM_EVCNT_ADD(&sc->sc_ev_ecol, CSR_READ(sc, WMREG_ECOL));
   6698 	WM_EVCNT_ADD(&sc->sc_ev_mcc, CSR_READ(sc, WMREG_MCC));
   6699 	WM_EVCNT_ADD(&sc->sc_ev_latecol, CSR_READ(sc, WMREG_LATECOL));
   6700 
   6701 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   6702 		WM_EVCNT_ADD(&sc->sc_ev_cbtmpc, CSR_READ(sc, WMREG_CBTMPC));
   6703 	}
   6704 
   6705 	WM_EVCNT_ADD(&sc->sc_ev_dc, CSR_READ(sc, WMREG_DC));
   6706 	WM_EVCNT_ADD(&sc->sc_ev_prc64, CSR_READ(sc, WMREG_PRC64));
   6707 	WM_EVCNT_ADD(&sc->sc_ev_prc127, CSR_READ(sc, WMREG_PRC127));
   6708 	WM_EVCNT_ADD(&sc->sc_ev_prc255, CSR_READ(sc, WMREG_PRC255));
   6709 	WM_EVCNT_ADD(&sc->sc_ev_prc511, CSR_READ(sc, WMREG_PRC511));
   6710 	WM_EVCNT_ADD(&sc->sc_ev_prc1023, CSR_READ(sc, WMREG_PRC1023));
   6711 	WM_EVCNT_ADD(&sc->sc_ev_prc1522, CSR_READ(sc, WMREG_PRC1522));
   6712 	WM_EVCNT_ADD(&sc->sc_ev_gprc, CSR_READ(sc, WMREG_GPRC));
   6713 	WM_EVCNT_ADD(&sc->sc_ev_bprc, CSR_READ(sc, WMREG_BPRC));
   6714 	WM_EVCNT_ADD(&sc->sc_ev_mprc, CSR_READ(sc, WMREG_MPRC));
   6715 	WM_EVCNT_ADD(&sc->sc_ev_gptc, CSR_READ(sc, WMREG_GPTC));
   6716 
   6717 	WM_EVCNT_ADD(&sc->sc_ev_gorc,
   6718 	    CSR_READ(sc, WMREG_GORCL) +
   6719 	    ((uint64_t)CSR_READ(sc, WMREG_GORCH) << 32));
   6720 	WM_EVCNT_ADD(&sc->sc_ev_gotc,
   6721 	    CSR_READ(sc, WMREG_GOTCL) +
   6722 	    ((uint64_t)CSR_READ(sc, WMREG_GOTCH) << 32));
   6723 
   6724 	WM_EVCNT_ADD(&sc->sc_ev_rnbc, CSR_READ(sc, WMREG_RNBC));
   6725 	WM_EVCNT_ADD(&sc->sc_ev_ruc, CSR_READ(sc, WMREG_RUC));
   6726 	WM_EVCNT_ADD(&sc->sc_ev_rfc, CSR_READ(sc, WMREG_RFC));
   6727 	WM_EVCNT_ADD(&sc->sc_ev_roc, CSR_READ(sc, WMREG_ROC));
   6728 	WM_EVCNT_ADD(&sc->sc_ev_rjc, CSR_READ(sc, WMREG_RJC));
   6729 
   6730 	if (sc->sc_type >= WM_T_82540) {
   6731 		WM_EVCNT_ADD(&sc->sc_ev_mgtprc, CSR_READ(sc, WMREG_MGTPRC));
   6732 		WM_EVCNT_ADD(&sc->sc_ev_mgtpdc, CSR_READ(sc, WMREG_MGTPDC));
   6733 		WM_EVCNT_ADD(&sc->sc_ev_mgtptc, CSR_READ(sc, WMREG_MGTPTC));
   6734 	}
   6735 
   6736 	/*
   6737 	 * The TOR(L) register includes:
   6738 	 *  - Error
   6739 	 *  - Flow control
   6740 	 *  - Broadcast rejected (This note is described in 82574 and newer
   6741 	 *    datasheets. What does "broadcast rejected" mean?)
   6742 	 */
   6743 	WM_EVCNT_ADD(&sc->sc_ev_tor,
   6744 	    CSR_READ(sc, WMREG_TORL) +
   6745 	    ((uint64_t)CSR_READ(sc, WMREG_TORH) << 32));
   6746 	WM_EVCNT_ADD(&sc->sc_ev_tot,
   6747 	    CSR_READ(sc, WMREG_TOTL) +
   6748 	    ((uint64_t)CSR_READ(sc, WMREG_TOTH) << 32));
   6749 
   6750 	WM_EVCNT_ADD(&sc->sc_ev_tpr, CSR_READ(sc, WMREG_TPR));
   6751 	WM_EVCNT_ADD(&sc->sc_ev_tpt, CSR_READ(sc, WMREG_TPT));
   6752 	WM_EVCNT_ADD(&sc->sc_ev_ptc64, CSR_READ(sc, WMREG_PTC64));
   6753 	WM_EVCNT_ADD(&sc->sc_ev_ptc127, CSR_READ(sc, WMREG_PTC127));
   6754 	WM_EVCNT_ADD(&sc->sc_ev_ptc255, CSR_READ(sc, WMREG_PTC255));
   6755 	WM_EVCNT_ADD(&sc->sc_ev_ptc511, CSR_READ(sc, WMREG_PTC511));
   6756 	WM_EVCNT_ADD(&sc->sc_ev_ptc1023, CSR_READ(sc, WMREG_PTC1023));
   6757 	WM_EVCNT_ADD(&sc->sc_ev_ptc1522, CSR_READ(sc, WMREG_PTC1522));
   6758 	WM_EVCNT_ADD(&sc->sc_ev_mptc, CSR_READ(sc, WMREG_MPTC));
   6759 	WM_EVCNT_ADD(&sc->sc_ev_bptc, CSR_READ(sc, WMREG_BPTC));
   6760 	if (sc->sc_type >= WM_T_82571)
   6761 		WM_EVCNT_ADD(&sc->sc_ev_iac, CSR_READ(sc, WMREG_IAC));
   6762 	if (sc->sc_type < WM_T_82575) {
   6763 		WM_EVCNT_ADD(&sc->sc_ev_icrxptc, CSR_READ(sc, WMREG_ICRXPTC));
   6764 		WM_EVCNT_ADD(&sc->sc_ev_icrxatc, CSR_READ(sc, WMREG_ICRXATC));
   6765 		WM_EVCNT_ADD(&sc->sc_ev_ictxptc, CSR_READ(sc, WMREG_ICTXPTC));
   6766 		WM_EVCNT_ADD(&sc->sc_ev_ictxatc, CSR_READ(sc, WMREG_ICTXATC));
   6767 		WM_EVCNT_ADD(&sc->sc_ev_ictxqec, CSR_READ(sc, WMREG_ICTXQEC));
   6768 		WM_EVCNT_ADD(&sc->sc_ev_ictxqmtc,
   6769 		    CSR_READ(sc, WMREG_ICTXQMTC));
   6770 		WM_EVCNT_ADD(&sc->sc_ev_rxdmtc,
   6771 		    CSR_READ(sc, WMREG_ICRXDMTC));
   6772 		WM_EVCNT_ADD(&sc->sc_ev_icrxoc, CSR_READ(sc, WMREG_ICRXOC));
   6773 	} else if (!WM_IS_ICHPCH(sc)) {
   6774 		WM_EVCNT_ADD(&sc->sc_ev_rpthc, CSR_READ(sc, WMREG_RPTHC));
   6775 		WM_EVCNT_ADD(&sc->sc_ev_debug1, CSR_READ(sc, WMREG_DEBUG1));
   6776 		WM_EVCNT_ADD(&sc->sc_ev_debug2, CSR_READ(sc, WMREG_DEBUG2));
   6777 		WM_EVCNT_ADD(&sc->sc_ev_debug3, CSR_READ(sc, WMREG_DEBUG3));
   6778 		WM_EVCNT_ADD(&sc->sc_ev_hgptc,  CSR_READ(sc, WMREG_HGPTC));
   6779 		WM_EVCNT_ADD(&sc->sc_ev_debug4, CSR_READ(sc, WMREG_DEBUG4));
   6780 		WM_EVCNT_ADD(&sc->sc_ev_rxdmtc, CSR_READ(sc, WMREG_RXDMTC));
   6781 		WM_EVCNT_ADD(&sc->sc_ev_htcbdpc, CSR_READ(sc, WMREG_HTCBDPC));
   6782 
   6783 		WM_EVCNT_ADD(&sc->sc_ev_hgorc,
   6784 		    CSR_READ(sc, WMREG_HGORCL) +
   6785 		    ((uint64_t)CSR_READ(sc, WMREG_HGORCH) << 32));
   6786 		WM_EVCNT_ADD(&sc->sc_ev_hgotc,
   6787 		    CSR_READ(sc, WMREG_HGOTCL) +
   6788 		    ((uint64_t)CSR_READ(sc, WMREG_HGOTCH) << 32));
   6789 		WM_EVCNT_ADD(&sc->sc_ev_lenerrs, CSR_READ(sc, WMREG_LENERRS));
   6790 		WM_EVCNT_ADD(&sc->sc_ev_scvpc, CSR_READ(sc, WMREG_SCVPC));
   6791 		WM_EVCNT_ADD(&sc->sc_ev_hrmpc, CSR_READ(sc, WMREG_HRMPC));
   6792 		for (i = 0; i < sc->sc_nqueues; i++) {
   6793 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6794 			uint32_t rqdpc;
   6795 
   6796 			rqdpc = CSR_READ(sc, WMREG_RQDPC(i));
   6797 			/*
   6798 			 * On I210 and newer device, the RQDPC register is not
   6799 			 * cleard on read.
   6800 			 */
   6801 			if ((rqdpc != 0) && (sc->sc_type >= WM_T_I210))
   6802 				CSR_WRITE(sc, WMREG_RQDPC(i), 0);
   6803 			WM_Q_EVCNT_ADD(rxq, qdrop, rqdpc);
   6804 			total_qdrop += rqdpc;
   6805 		}
   6806 	}
   6807 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   6808 		WM_EVCNT_ADD(&sc->sc_ev_tlpic, CSR_READ(sc, WMREG_TLPIC));
   6809 		WM_EVCNT_ADD(&sc->sc_ev_rlpic, CSR_READ(sc, WMREG_RLPIC));
   6810 		if ((CSR_READ(sc, WMREG_MANC) & MANC_EN_BMC2OS) != 0) {
   6811 			WM_EVCNT_ADD(&sc->sc_ev_b2ogprc,
   6812 			    CSR_READ(sc, WMREG_B2OGPRC));
   6813 			WM_EVCNT_ADD(&sc->sc_ev_o2bspc,
   6814 			    CSR_READ(sc, WMREG_O2BSPC));
   6815 			WM_EVCNT_ADD(&sc->sc_ev_b2ospc,
   6816 			    CSR_READ(sc, WMREG_B2OSPC));
   6817 			WM_EVCNT_ADD(&sc->sc_ev_o2bgptc,
   6818 			    CSR_READ(sc, WMREG_O2BGPTC));
   6819 		}
   6820 	}
   6821 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   6822 	if_statadd_ref(nsr, if_collisions, colc);
   6823 	if_statadd_ref(nsr, if_ierrors,
   6824 	    crcerrs + algnerrc + symerrc + rxerrc + sec + cexterr + rlec);
   6825 	/*
   6826 	 * WMREG_RNBC is incremented when there are no available buffers in
   6827 	 * host memory. It does not mean the number of dropped packets, because
   6828 	 * an Ethernet controller can receive packets in such case if there is
   6829 	 * space in the phy's FIFO.
   6830 	 *
   6831 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   6832 	 * own EVCNT instead of if_iqdrops.
   6833 	 */
   6834 	if_statadd_ref(nsr, if_iqdrops, mpc + total_qdrop);
   6835 	IF_STAT_PUTREF(ifp);
   6836 }
   6837 
   6838 void
   6839 wm_clear_evcnt(struct wm_softc *sc)
   6840 {
   6841 #ifdef WM_EVENT_COUNTERS
   6842 	int i;
   6843 
   6844 	/* RX queues */
   6845 	for (i = 0; i < sc->sc_nqueues; i++) {
   6846 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6847 
   6848 		WM_Q_EVCNT_STORE(rxq, intr, 0);
   6849 		WM_Q_EVCNT_STORE(rxq, defer, 0);
   6850 		WM_Q_EVCNT_STORE(rxq, ipsum, 0);
   6851 		WM_Q_EVCNT_STORE(rxq, tusum, 0);
   6852 		if ((sc->sc_type >= WM_T_82575) && !WM_IS_ICHPCH(sc))
   6853 			WM_Q_EVCNT_STORE(rxq, qdrop, 0);
   6854 	}
   6855 
   6856 	/* TX queues */
   6857 	for (i = 0; i < sc->sc_nqueues; i++) {
   6858 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6859 		int j;
   6860 
   6861 		WM_Q_EVCNT_STORE(txq, txsstall, 0);
   6862 		WM_Q_EVCNT_STORE(txq, txdstall, 0);
   6863 		WM_Q_EVCNT_STORE(txq, fifo_stall, 0);
   6864 		WM_Q_EVCNT_STORE(txq, txdw, 0);
   6865 		WM_Q_EVCNT_STORE(txq, txqe, 0);
   6866 		WM_Q_EVCNT_STORE(txq, ipsum, 0);
   6867 		WM_Q_EVCNT_STORE(txq, tusum, 0);
   6868 		WM_Q_EVCNT_STORE(txq, tusum6, 0);
   6869 		WM_Q_EVCNT_STORE(txq, tso, 0);
   6870 		WM_Q_EVCNT_STORE(txq, tso6, 0);
   6871 		WM_Q_EVCNT_STORE(txq, tsopain, 0);
   6872 
   6873 		for (j = 0; j < WM_NTXSEGS; j++)
   6874 			WM_EVCNT_STORE(&txq->txq_ev_txseg[j], 0);
   6875 
   6876 		WM_Q_EVCNT_STORE(txq, pcqdrop, 0);
   6877 		WM_Q_EVCNT_STORE(txq, descdrop, 0);
   6878 		WM_Q_EVCNT_STORE(txq, toomanyseg, 0);
   6879 		WM_Q_EVCNT_STORE(txq, defrag, 0);
   6880 		if (sc->sc_type <= WM_T_82544)
   6881 			WM_Q_EVCNT_STORE(txq, underrun, 0);
   6882 		WM_Q_EVCNT_STORE(txq, skipcontext, 0);
   6883 	}
   6884 
   6885 	/* Miscs */
   6886 	WM_EVCNT_STORE(&sc->sc_ev_linkintr, 0);
   6887 
   6888 	WM_EVCNT_STORE(&sc->sc_ev_crcerrs, 0);
   6889 	WM_EVCNT_STORE(&sc->sc_ev_symerrc, 0);
   6890 	WM_EVCNT_STORE(&sc->sc_ev_mpc, 0);
   6891 	WM_EVCNT_STORE(&sc->sc_ev_colc, 0);
   6892 	WM_EVCNT_STORE(&sc->sc_ev_sec, 0);
   6893 	WM_EVCNT_STORE(&sc->sc_ev_rlec, 0);
   6894 
   6895 	if (sc->sc_type >= WM_T_82543) {
   6896 		WM_EVCNT_STORE(&sc->sc_ev_algnerrc, 0);
   6897 		WM_EVCNT_STORE(&sc->sc_ev_rxerrc, 0);
   6898 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   6899 			WM_EVCNT_STORE(&sc->sc_ev_cexterr, 0);
   6900 		else
   6901 			WM_EVCNT_STORE(&sc->sc_ev_htdpmc, 0);
   6902 
   6903 		WM_EVCNT_STORE(&sc->sc_ev_tncrs, 0);
   6904 		WM_EVCNT_STORE(&sc->sc_ev_tsctc, 0);
   6905 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   6906 			WM_EVCNT_STORE(&sc->sc_ev_tsctfc, 0);
   6907 		else {
   6908 			WM_EVCNT_STORE(&sc->sc_ev_cbrdpc, 0);
   6909 			WM_EVCNT_STORE(&sc->sc_ev_cbrmpc, 0);
   6910 		}
   6911 	}
   6912 
   6913 	if (sc->sc_type >= WM_T_82542_2_1) {
   6914 		WM_EVCNT_STORE(&sc->sc_ev_tx_xoff, 0);
   6915 		WM_EVCNT_STORE(&sc->sc_ev_tx_xon, 0);
   6916 		WM_EVCNT_STORE(&sc->sc_ev_rx_xoff, 0);
   6917 		WM_EVCNT_STORE(&sc->sc_ev_rx_xon, 0);
   6918 		WM_EVCNT_STORE(&sc->sc_ev_rx_macctl, 0);
   6919 	}
   6920 
   6921 	WM_EVCNT_STORE(&sc->sc_ev_scc, 0);
   6922 	WM_EVCNT_STORE(&sc->sc_ev_ecol, 0);
   6923 	WM_EVCNT_STORE(&sc->sc_ev_mcc, 0);
   6924 	WM_EVCNT_STORE(&sc->sc_ev_latecol, 0);
   6925 
   6926 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
   6927 		WM_EVCNT_STORE(&sc->sc_ev_cbtmpc, 0);
   6928 
   6929 	WM_EVCNT_STORE(&sc->sc_ev_dc, 0);
   6930 	WM_EVCNT_STORE(&sc->sc_ev_prc64, 0);
   6931 	WM_EVCNT_STORE(&sc->sc_ev_prc127, 0);
   6932 	WM_EVCNT_STORE(&sc->sc_ev_prc255, 0);
   6933 	WM_EVCNT_STORE(&sc->sc_ev_prc511, 0);
   6934 	WM_EVCNT_STORE(&sc->sc_ev_prc1023, 0);
   6935 	WM_EVCNT_STORE(&sc->sc_ev_prc1522, 0);
   6936 	WM_EVCNT_STORE(&sc->sc_ev_gprc, 0);
   6937 	WM_EVCNT_STORE(&sc->sc_ev_bprc, 0);
   6938 	WM_EVCNT_STORE(&sc->sc_ev_mprc, 0);
   6939 	WM_EVCNT_STORE(&sc->sc_ev_gptc, 0);
   6940 	WM_EVCNT_STORE(&sc->sc_ev_gorc, 0);
   6941 	WM_EVCNT_STORE(&sc->sc_ev_gotc, 0);
   6942 	WM_EVCNT_STORE(&sc->sc_ev_rnbc, 0);
   6943 	WM_EVCNT_STORE(&sc->sc_ev_ruc, 0);
   6944 	WM_EVCNT_STORE(&sc->sc_ev_rfc, 0);
   6945 	WM_EVCNT_STORE(&sc->sc_ev_roc, 0);
   6946 	WM_EVCNT_STORE(&sc->sc_ev_rjc, 0);
   6947 	if (sc->sc_type >= WM_T_82540) {
   6948 		WM_EVCNT_STORE(&sc->sc_ev_mgtprc, 0);
   6949 		WM_EVCNT_STORE(&sc->sc_ev_mgtpdc, 0);
   6950 		WM_EVCNT_STORE(&sc->sc_ev_mgtptc, 0);
   6951 	}
   6952 	WM_EVCNT_STORE(&sc->sc_ev_tor, 0);
   6953 	WM_EVCNT_STORE(&sc->sc_ev_tot, 0);
   6954 	WM_EVCNT_STORE(&sc->sc_ev_tpr, 0);
   6955 	WM_EVCNT_STORE(&sc->sc_ev_tpt, 0);
   6956 	WM_EVCNT_STORE(&sc->sc_ev_ptc64, 0);
   6957 	WM_EVCNT_STORE(&sc->sc_ev_ptc127, 0);
   6958 	WM_EVCNT_STORE(&sc->sc_ev_ptc255, 0);
   6959 	WM_EVCNT_STORE(&sc->sc_ev_ptc511, 0);
   6960 	WM_EVCNT_STORE(&sc->sc_ev_ptc1023, 0);
   6961 	WM_EVCNT_STORE(&sc->sc_ev_ptc1522, 0);
   6962 	WM_EVCNT_STORE(&sc->sc_ev_mptc, 0);
   6963 	WM_EVCNT_STORE(&sc->sc_ev_bptc, 0);
   6964 	if (sc->sc_type >= WM_T_82571)
   6965 		WM_EVCNT_STORE(&sc->sc_ev_iac, 0);
   6966 	if (sc->sc_type < WM_T_82575) {
   6967 		WM_EVCNT_STORE(&sc->sc_ev_icrxptc, 0);
   6968 		WM_EVCNT_STORE(&sc->sc_ev_icrxatc, 0);
   6969 		WM_EVCNT_STORE(&sc->sc_ev_ictxptc, 0);
   6970 		WM_EVCNT_STORE(&sc->sc_ev_ictxatc, 0);
   6971 		WM_EVCNT_STORE(&sc->sc_ev_ictxqec, 0);
   6972 		WM_EVCNT_STORE(&sc->sc_ev_ictxqmtc, 0);
   6973 		WM_EVCNT_STORE(&sc->sc_ev_rxdmtc, 0);
   6974 		WM_EVCNT_STORE(&sc->sc_ev_icrxoc, 0);
   6975 	} else if (!WM_IS_ICHPCH(sc)) {
   6976 		WM_EVCNT_STORE(&sc->sc_ev_rpthc, 0);
   6977 		WM_EVCNT_STORE(&sc->sc_ev_debug1, 0);
   6978 		WM_EVCNT_STORE(&sc->sc_ev_debug2, 0);
   6979 		WM_EVCNT_STORE(&sc->sc_ev_debug3, 0);
   6980 		WM_EVCNT_STORE(&sc->sc_ev_hgptc, 0);
   6981 		WM_EVCNT_STORE(&sc->sc_ev_debug4, 0);
   6982 		WM_EVCNT_STORE(&sc->sc_ev_rxdmtc, 0);
   6983 		WM_EVCNT_STORE(&sc->sc_ev_htcbdpc, 0);
   6984 
   6985 		WM_EVCNT_STORE(&sc->sc_ev_hgorc, 0);
   6986 		WM_EVCNT_STORE(&sc->sc_ev_hgotc, 0);
   6987 		WM_EVCNT_STORE(&sc->sc_ev_lenerrs, 0);
   6988 		WM_EVCNT_STORE(&sc->sc_ev_scvpc, 0);
   6989 		WM_EVCNT_STORE(&sc->sc_ev_hrmpc, 0);
   6990 	}
   6991 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   6992 		WM_EVCNT_STORE(&sc->sc_ev_tlpic, 0);
   6993 		WM_EVCNT_STORE(&sc->sc_ev_rlpic, 0);
   6994 		WM_EVCNT_STORE(&sc->sc_ev_b2ogprc, 0);
   6995 		WM_EVCNT_STORE(&sc->sc_ev_o2bspc, 0);
   6996 		WM_EVCNT_STORE(&sc->sc_ev_b2ospc, 0);
   6997 		WM_EVCNT_STORE(&sc->sc_ev_o2bgptc, 0);
   6998 	}
   6999 #endif
   7000 }
   7001 
   7002 /*
   7003  * wm_init:		[ifnet interface function]
   7004  *
   7005  *	Initialize the interface.
   7006  */
   7007 static int
   7008 wm_init(struct ifnet *ifp)
   7009 {
   7010 	struct wm_softc *sc = ifp->if_softc;
   7011 	int ret;
   7012 
   7013 	KASSERT(IFNET_LOCKED(ifp));
   7014 
   7015 	if (sc->sc_dying)
   7016 		return ENXIO;
   7017 
   7018 	mutex_enter(sc->sc_core_lock);
   7019 	ret = wm_init_locked(ifp);
   7020 	mutex_exit(sc->sc_core_lock);
   7021 
   7022 	return ret;
   7023 }
   7024 
   7025 static int
   7026 wm_init_locked(struct ifnet *ifp)
   7027 {
   7028 	struct wm_softc *sc = ifp->if_softc;
   7029 	struct ethercom *ec = &sc->sc_ethercom;
   7030 	int i, j, trynum, error = 0;
   7031 	uint32_t reg, sfp_mask = 0;
   7032 
   7033 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7034 		device_xname(sc->sc_dev), __func__));
   7035 	KASSERT(IFNET_LOCKED(ifp));
   7036 	KASSERT(mutex_owned(sc->sc_core_lock));
   7037 
   7038 	/*
   7039 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   7040 	 * There is a small but measurable benefit to avoiding the adjusment
   7041 	 * of the descriptor so that the headers are aligned, for normal mtu,
   7042 	 * on such platforms.  One possibility is that the DMA itself is
   7043 	 * slightly more efficient if the front of the entire packet (instead
   7044 	 * of the front of the headers) is aligned.
   7045 	 *
   7046 	 * Note we must always set align_tweak to 0 if we are using
   7047 	 * jumbo frames.
   7048 	 */
   7049 #ifdef __NO_STRICT_ALIGNMENT
   7050 	sc->sc_align_tweak = 0;
   7051 #else
   7052 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   7053 		sc->sc_align_tweak = 0;
   7054 	else
   7055 		sc->sc_align_tweak = 2;
   7056 #endif /* __NO_STRICT_ALIGNMENT */
   7057 
   7058 	/* Cancel any pending I/O. */
   7059 	wm_stop_locked(ifp, false, false);
   7060 
   7061 	/* Update statistics before reset */
   7062 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   7063 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   7064 
   7065 	/* >= PCH_SPT hardware workaround before reset. */
   7066 	if (sc->sc_type >= WM_T_PCH_SPT)
   7067 		wm_flush_desc_rings(sc);
   7068 
   7069 	/* Reset the chip to a known state. */
   7070 	wm_reset(sc);
   7071 
   7072 	/*
   7073 	 * AMT based hardware can now take control from firmware
   7074 	 * Do this after reset.
   7075 	 */
   7076 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   7077 		wm_get_hw_control(sc);
   7078 
   7079 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   7080 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   7081 		wm_legacy_irq_quirk_spt(sc);
   7082 
   7083 	/* Init hardware bits */
   7084 	wm_initialize_hardware_bits(sc);
   7085 
   7086 	/* Reset the PHY. */
   7087 	if (sc->sc_flags & WM_F_HAS_MII)
   7088 		wm_gmii_reset(sc);
   7089 
   7090 	if (sc->sc_type >= WM_T_ICH8) {
   7091 		reg = CSR_READ(sc, WMREG_GCR);
   7092 		/*
   7093 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   7094 		 * default after reset.
   7095 		 */
   7096 		if (sc->sc_type == WM_T_ICH8)
   7097 			reg |= GCR_NO_SNOOP_ALL;
   7098 		else
   7099 			reg &= ~GCR_NO_SNOOP_ALL;
   7100 		CSR_WRITE(sc, WMREG_GCR, reg);
   7101 	}
   7102 
   7103 	/* Ungate DMA clock to avoid packet loss */
   7104 	if (sc->sc_type >= WM_T_PCH_TGP) {
   7105 		reg = CSR_READ(sc, WMREG_FFLT_DBG);
   7106 		reg |= (1 << 12);
   7107 		CSR_WRITE(sc, WMREG_FFLT_DBG, reg);
   7108 	}
   7109 
   7110 	if ((sc->sc_type >= WM_T_ICH8)
   7111 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   7112 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   7113 
   7114 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7115 		reg |= CTRL_EXT_RO_DIS;
   7116 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7117 	}
   7118 
   7119 	/* Calculate (E)ITR value */
   7120 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   7121 		/*
   7122 		 * For NEWQUEUE's EITR (except for 82575).
   7123 		 * 82575's EITR should be set same throttling value as other
   7124 		 * old controllers' ITR because the interrupt/sec calculation
   7125 		 * is the same, that is, 1,000,000,000 / (N * 256).
   7126 		 *
   7127 		 * 82574's EITR should be set same throttling value as ITR.
   7128 		 *
   7129 		 * For N interrupts/sec, set this value to:
   7130 		 * 1,000,000 / N in contrast to ITR throttling value.
   7131 		 */
   7132 		sc->sc_itr_init = 450;
   7133 	} else if (sc->sc_type >= WM_T_82543) {
   7134 		/*
   7135 		 * Set up the interrupt throttling register (units of 256ns)
   7136 		 * Note that a footnote in Intel's documentation says this
   7137 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   7138 		 * or 10Mbit mode.  Empirically, it appears to be the case
   7139 		 * that that is also true for the 1024ns units of the other
   7140 		 * interrupt-related timer registers -- so, really, we ought
   7141 		 * to divide this value by 4 when the link speed is low.
   7142 		 *
   7143 		 * XXX implement this division at link speed change!
   7144 		 */
   7145 
   7146 		/*
   7147 		 * For N interrupts/sec, set this value to:
   7148 		 * 1,000,000,000 / (N * 256).  Note that we set the
   7149 		 * absolute and packet timer values to this value
   7150 		 * divided by 4 to get "simple timer" behavior.
   7151 		 */
   7152 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   7153 	}
   7154 
   7155 	error = wm_init_txrx_queues(sc);
   7156 	if (error)
   7157 		goto out;
   7158 
   7159 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   7160 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   7161 	    (sc->sc_type >= WM_T_82575))
   7162 		wm_serdes_power_up_link_82575(sc);
   7163 
   7164 	/* Clear out the VLAN table -- we don't use it (yet). */
   7165 	CSR_WRITE(sc, WMREG_VET, 0);
   7166 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   7167 		trynum = 10; /* Due to hw errata */
   7168 	else
   7169 		trynum = 1;
   7170 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   7171 		for (j = 0; j < trynum; j++)
   7172 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   7173 
   7174 	/*
   7175 	 * Set up flow-control parameters.
   7176 	 *
   7177 	 * XXX Values could probably stand some tuning.
   7178 	 */
   7179 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   7180 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   7181 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   7182 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)
   7183 	    && (sc->sc_type != WM_T_PCH_TGP)) {
   7184 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   7185 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   7186 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   7187 	}
   7188 
   7189 	sc->sc_fcrtl = FCRTL_DFLT;
   7190 	if (sc->sc_type < WM_T_82543) {
   7191 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   7192 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   7193 	} else {
   7194 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   7195 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   7196 	}
   7197 
   7198 	if (sc->sc_type == WM_T_80003)
   7199 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   7200 	else
   7201 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   7202 
   7203 	/* Writes the control register. */
   7204 	wm_set_vlan(sc);
   7205 
   7206 	if (sc->sc_flags & WM_F_HAS_MII) {
   7207 		uint16_t kmreg;
   7208 
   7209 		switch (sc->sc_type) {
   7210 		case WM_T_80003:
   7211 		case WM_T_ICH8:
   7212 		case WM_T_ICH9:
   7213 		case WM_T_ICH10:
   7214 		case WM_T_PCH:
   7215 		case WM_T_PCH2:
   7216 		case WM_T_PCH_LPT:
   7217 		case WM_T_PCH_SPT:
   7218 		case WM_T_PCH_CNP:
   7219 		case WM_T_PCH_TGP:
   7220 			/*
   7221 			 * Set the mac to wait the maximum time between each
   7222 			 * iteration and increase the max iterations when
   7223 			 * polling the phy; this fixes erroneous timeouts at
   7224 			 * 10Mbps.
   7225 			 */
   7226 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   7227 			    0xFFFF);
   7228 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   7229 			    &kmreg);
   7230 			kmreg |= 0x3F;
   7231 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   7232 			    kmreg);
   7233 			break;
   7234 		default:
   7235 			break;
   7236 		}
   7237 
   7238 		if (sc->sc_type == WM_T_80003) {
   7239 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7240 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   7241 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7242 
   7243 			/* Bypass RX and TX FIFOs */
   7244 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   7245 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   7246 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   7247 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   7248 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   7249 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   7250 		}
   7251 	}
   7252 #if 0
   7253 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   7254 #endif
   7255 
   7256 	/* Set up checksum offload parameters. */
   7257 	reg = CSR_READ(sc, WMREG_RXCSUM);
   7258 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   7259 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   7260 		reg |= RXCSUM_IPOFL;
   7261 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   7262 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   7263 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   7264 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   7265 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   7266 
   7267 	/* Set registers about MSI-X */
   7268 	if (wm_is_using_msix(sc)) {
   7269 		uint32_t ivar, qintr_idx;
   7270 		struct wm_queue *wmq;
   7271 		unsigned int qid;
   7272 
   7273 		if (sc->sc_type == WM_T_82575) {
   7274 			/* Interrupt control */
   7275 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7276 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   7277 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7278 
   7279 			/* TX and RX */
   7280 			for (i = 0; i < sc->sc_nqueues; i++) {
   7281 				wmq = &sc->sc_queue[i];
   7282 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   7283 				    EITR_TX_QUEUE(wmq->wmq_id)
   7284 				    | EITR_RX_QUEUE(wmq->wmq_id));
   7285 			}
   7286 			/* Link status */
   7287 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   7288 			    EITR_OTHER);
   7289 		} else if (sc->sc_type == WM_T_82574) {
   7290 			/* Interrupt control */
   7291 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7292 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   7293 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7294 
   7295 			/*
   7296 			 * Work around issue with spurious interrupts
   7297 			 * in MSI-X mode.
   7298 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   7299 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   7300 			 */
   7301 			reg = CSR_READ(sc, WMREG_RFCTL);
   7302 			reg |= WMREG_RFCTL_ACKDIS;
   7303 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   7304 
   7305 			ivar = 0;
   7306 			/* TX and RX */
   7307 			for (i = 0; i < sc->sc_nqueues; i++) {
   7308 				wmq = &sc->sc_queue[i];
   7309 				qid = wmq->wmq_id;
   7310 				qintr_idx = wmq->wmq_intr_idx;
   7311 
   7312 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   7313 				    IVAR_TX_MASK_Q_82574(qid));
   7314 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   7315 				    IVAR_RX_MASK_Q_82574(qid));
   7316 			}
   7317 			/* Link status */
   7318 			ivar |= __SHIFTIN((IVAR_VALID_82574
   7319 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   7320 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   7321 		} else {
   7322 			/* Interrupt control */
   7323 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   7324 			    | GPIE_EIAME | GPIE_PBA);
   7325 
   7326 			switch (sc->sc_type) {
   7327 			case WM_T_82580:
   7328 			case WM_T_I350:
   7329 			case WM_T_I354:
   7330 			case WM_T_I210:
   7331 			case WM_T_I211:
   7332 				/* TX and RX */
   7333 				for (i = 0; i < sc->sc_nqueues; i++) {
   7334 					wmq = &sc->sc_queue[i];
   7335 					qid = wmq->wmq_id;
   7336 					qintr_idx = wmq->wmq_intr_idx;
   7337 
   7338 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   7339 					ivar &= ~IVAR_TX_MASK_Q(qid);
   7340 					ivar |= __SHIFTIN((qintr_idx
   7341 						| IVAR_VALID),
   7342 					    IVAR_TX_MASK_Q(qid));
   7343 					ivar &= ~IVAR_RX_MASK_Q(qid);
   7344 					ivar |= __SHIFTIN((qintr_idx
   7345 						| IVAR_VALID),
   7346 					    IVAR_RX_MASK_Q(qid));
   7347 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   7348 				}
   7349 				break;
   7350 			case WM_T_82576:
   7351 				/* TX and RX */
   7352 				for (i = 0; i < sc->sc_nqueues; i++) {
   7353 					wmq = &sc->sc_queue[i];
   7354 					qid = wmq->wmq_id;
   7355 					qintr_idx = wmq->wmq_intr_idx;
   7356 
   7357 					ivar = CSR_READ(sc,
   7358 					    WMREG_IVAR_Q_82576(qid));
   7359 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   7360 					ivar |= __SHIFTIN((qintr_idx
   7361 						| IVAR_VALID),
   7362 					    IVAR_TX_MASK_Q_82576(qid));
   7363 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   7364 					ivar |= __SHIFTIN((qintr_idx
   7365 						| IVAR_VALID),
   7366 					    IVAR_RX_MASK_Q_82576(qid));
   7367 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   7368 					    ivar);
   7369 				}
   7370 				break;
   7371 			default:
   7372 				break;
   7373 			}
   7374 
   7375 			/* Link status */
   7376 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   7377 			    IVAR_MISC_OTHER);
   7378 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   7379 		}
   7380 
   7381 		if (wm_is_using_multiqueue(sc)) {
   7382 			wm_init_rss(sc);
   7383 
   7384 			/*
   7385 			** NOTE: Receive Full-Packet Checksum Offload
   7386 			** is mutually exclusive with Multiqueue. However
   7387 			** this is not the same as TCP/IP checksums which
   7388 			** still work.
   7389 			*/
   7390 			reg = CSR_READ(sc, WMREG_RXCSUM);
   7391 			reg |= RXCSUM_PCSD;
   7392 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   7393 		}
   7394 	}
   7395 
   7396 	/* Set up the interrupt registers. */
   7397 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   7398 
   7399 	/* Enable SFP module insertion interrupt if it's required */
   7400 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   7401 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   7402 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7403 		sfp_mask = ICR_GPI(0);
   7404 	}
   7405 
   7406 	if (wm_is_using_msix(sc)) {
   7407 		uint32_t mask;
   7408 		struct wm_queue *wmq;
   7409 
   7410 		switch (sc->sc_type) {
   7411 		case WM_T_82574:
   7412 			mask = 0;
   7413 			for (i = 0; i < sc->sc_nqueues; i++) {
   7414 				wmq = &sc->sc_queue[i];
   7415 				mask |= ICR_TXQ(wmq->wmq_id);
   7416 				mask |= ICR_RXQ(wmq->wmq_id);
   7417 			}
   7418 			mask |= ICR_OTHER;
   7419 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   7420 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   7421 			break;
   7422 		default:
   7423 			if (sc->sc_type == WM_T_82575) {
   7424 				mask = 0;
   7425 				for (i = 0; i < sc->sc_nqueues; i++) {
   7426 					wmq = &sc->sc_queue[i];
   7427 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   7428 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   7429 				}
   7430 				mask |= EITR_OTHER;
   7431 			} else {
   7432 				mask = 0;
   7433 				for (i = 0; i < sc->sc_nqueues; i++) {
   7434 					wmq = &sc->sc_queue[i];
   7435 					mask |= 1 << wmq->wmq_intr_idx;
   7436 				}
   7437 				mask |= 1 << sc->sc_link_intr_idx;
   7438 			}
   7439 			CSR_WRITE(sc, WMREG_EIAC, mask);
   7440 			CSR_WRITE(sc, WMREG_EIAM, mask);
   7441 			CSR_WRITE(sc, WMREG_EIMS, mask);
   7442 
   7443 			/* For other interrupts */
   7444 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   7445 			break;
   7446 		}
   7447 	} else {
   7448 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   7449 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   7450 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   7451 	}
   7452 
   7453 	/* Set up the inter-packet gap. */
   7454 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   7455 
   7456 	if (sc->sc_type >= WM_T_82543) {
   7457 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7458 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   7459 			wm_itrs_writereg(sc, wmq);
   7460 		}
   7461 		/*
   7462 		 * Link interrupts occur much less than TX
   7463 		 * interrupts and RX interrupts. So, we don't
   7464 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   7465 		 * FreeBSD's if_igb.
   7466 		 */
   7467 	}
   7468 
   7469 	/* Set the VLAN EtherType. */
   7470 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   7471 
   7472 	/*
   7473 	 * Set up the transmit control register; we start out with
   7474 	 * a collision distance suitable for FDX, but update it when
   7475 	 * we resolve the media type.
   7476 	 */
   7477 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   7478 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   7479 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7480 	if (sc->sc_type >= WM_T_82571)
   7481 		sc->sc_tctl |= TCTL_MULR;
   7482 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7483 
   7484 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7485 		/* Write TDT after TCTL.EN is set. See the document. */
   7486 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   7487 	}
   7488 
   7489 	if (sc->sc_type == WM_T_80003) {
   7490 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   7491 		reg &= ~TCTL_EXT_GCEX_MASK;
   7492 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   7493 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   7494 	}
   7495 
   7496 	/* Set the media. */
   7497 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   7498 		goto out;
   7499 
   7500 	/* Configure for OS presence */
   7501 	wm_init_manageability(sc);
   7502 
   7503 	/*
   7504 	 * Set up the receive control register; we actually program the
   7505 	 * register when we set the receive filter. Use multicast address
   7506 	 * offset type 0.
   7507 	 *
   7508 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   7509 	 * don't enable that feature.
   7510 	 */
   7511 	sc->sc_mchash_type = 0;
   7512 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   7513 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   7514 
   7515 	/* 82574 use one buffer extended Rx descriptor. */
   7516 	if (sc->sc_type == WM_T_82574)
   7517 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   7518 
   7519 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
   7520 		sc->sc_rctl |= RCTL_SECRC;
   7521 
   7522 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   7523 	    && (ifp->if_mtu > ETHERMTU)) {
   7524 		sc->sc_rctl |= RCTL_LPE;
   7525 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7526 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   7527 	}
   7528 
   7529 	if (MCLBYTES == 2048)
   7530 		sc->sc_rctl |= RCTL_2k;
   7531 	else {
   7532 		if (sc->sc_type >= WM_T_82543) {
   7533 			switch (MCLBYTES) {
   7534 			case 4096:
   7535 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   7536 				break;
   7537 			case 8192:
   7538 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   7539 				break;
   7540 			case 16384:
   7541 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   7542 				break;
   7543 			default:
   7544 				panic("wm_init: MCLBYTES %d unsupported",
   7545 				    MCLBYTES);
   7546 				break;
   7547 			}
   7548 		} else
   7549 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   7550 	}
   7551 
   7552 	/* Enable ECC */
   7553 	switch (sc->sc_type) {
   7554 	case WM_T_82571:
   7555 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   7556 		reg |= PBA_ECC_CORR_EN;
   7557 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   7558 		break;
   7559 	case WM_T_PCH_LPT:
   7560 	case WM_T_PCH_SPT:
   7561 	case WM_T_PCH_CNP:
   7562 	case WM_T_PCH_TGP:
   7563 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   7564 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   7565 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   7566 
   7567 		sc->sc_ctrl |= CTRL_MEHE;
   7568 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7569 		break;
   7570 	default:
   7571 		break;
   7572 	}
   7573 
   7574 	/*
   7575 	 * Set the receive filter.
   7576 	 *
   7577 	 * For 82575 and 82576, the RX descriptors must be initialized after
   7578 	 * the setting of RCTL.EN in wm_set_filter()
   7579 	 */
   7580 	wm_set_filter(sc);
   7581 
   7582 	/* On 575 and later set RDT only if RX enabled */
   7583 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7584 		int qidx;
   7585 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7586 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   7587 			for (i = 0; i < WM_NRXDESC; i++) {
   7588 				mutex_enter(rxq->rxq_lock);
   7589 				wm_init_rxdesc(rxq, i);
   7590 				mutex_exit(rxq->rxq_lock);
   7591 
   7592 			}
   7593 		}
   7594 	}
   7595 
   7596 	wm_unset_stopping_flags(sc);
   7597 
   7598 	/* Start the one second link check clock. */
   7599 	callout_schedule(&sc->sc_tick_ch, hz);
   7600 
   7601 	/*
   7602 	 * ...all done! (IFNET_LOCKED asserted above.)
   7603 	 */
   7604 	ifp->if_flags |= IFF_RUNNING;
   7605 
   7606 out:
   7607 	/* Save last flags for the callback */
   7608 	sc->sc_if_flags = ifp->if_flags;
   7609 	sc->sc_ec_capenable = ec->ec_capenable;
   7610 	if (error)
   7611 		log(LOG_ERR, "%s: interface not running\n",
   7612 		    device_xname(sc->sc_dev));
   7613 	return error;
   7614 }
   7615 
   7616 /*
   7617  * wm_stop:		[ifnet interface function]
   7618  *
   7619  *	Stop transmission on the interface.
   7620  */
   7621 static void
   7622 wm_stop(struct ifnet *ifp, int disable)
   7623 {
   7624 	struct wm_softc *sc = ifp->if_softc;
   7625 
   7626 	ASSERT_SLEEPABLE();
   7627 	KASSERT(IFNET_LOCKED(ifp));
   7628 
   7629 	mutex_enter(sc->sc_core_lock);
   7630 	wm_stop_locked(ifp, disable ? true : false, true);
   7631 	mutex_exit(sc->sc_core_lock);
   7632 
   7633 	/*
   7634 	 * After wm_set_stopping_flags(), it is guaranteed that
   7635 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   7636 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   7637 	 * because it can sleep...
   7638 	 * so, call workqueue_wait() here.
   7639 	 */
   7640 	for (int i = 0; i < sc->sc_nqueues; i++)
   7641 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   7642 	workqueue_wait(sc->sc_reset_wq, &sc->sc_reset_work);
   7643 }
   7644 
   7645 static void
   7646 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   7647 {
   7648 	struct wm_softc *sc = ifp->if_softc;
   7649 	struct wm_txsoft *txs;
   7650 	int i, qidx;
   7651 
   7652 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7653 		device_xname(sc->sc_dev), __func__));
   7654 	KASSERT(IFNET_LOCKED(ifp));
   7655 	KASSERT(mutex_owned(sc->sc_core_lock));
   7656 
   7657 	wm_set_stopping_flags(sc);
   7658 
   7659 	if (sc->sc_flags & WM_F_HAS_MII) {
   7660 		/* Down the MII. */
   7661 		mii_down(&sc->sc_mii);
   7662 	} else {
   7663 #if 0
   7664 		/* Should we clear PHY's status properly? */
   7665 		wm_reset(sc);
   7666 #endif
   7667 	}
   7668 
   7669 	/* Stop the transmit and receive processes. */
   7670 	CSR_WRITE(sc, WMREG_TCTL, 0);
   7671 	CSR_WRITE(sc, WMREG_RCTL, 0);
   7672 	sc->sc_rctl &= ~RCTL_EN;
   7673 
   7674 	/*
   7675 	 * Clear the interrupt mask to ensure the device cannot assert its
   7676 	 * interrupt line.
   7677 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   7678 	 * service any currently pending or shared interrupt.
   7679 	 */
   7680 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   7681 	sc->sc_icr = 0;
   7682 	if (wm_is_using_msix(sc)) {
   7683 		if (sc->sc_type != WM_T_82574) {
   7684 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   7685 			CSR_WRITE(sc, WMREG_EIAC, 0);
   7686 		} else
   7687 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   7688 	}
   7689 
   7690 	/*
   7691 	 * Stop callouts after interrupts are disabled; if we have
   7692 	 * to wait for them, we will be releasing the CORE_LOCK
   7693 	 * briefly, which will unblock interrupts on the current CPU.
   7694 	 */
   7695 
   7696 	/* Stop the one second clock. */
   7697 	if (wait)
   7698 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   7699 	else
   7700 		callout_stop(&sc->sc_tick_ch);
   7701 
   7702 	/* Stop the 82547 Tx FIFO stall check timer. */
   7703 	if (sc->sc_type == WM_T_82547) {
   7704 		if (wait)
   7705 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   7706 		else
   7707 			callout_stop(&sc->sc_txfifo_ch);
   7708 	}
   7709 
   7710 	/* Release any queued transmit buffers. */
   7711 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7712 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   7713 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7714 		struct mbuf *m;
   7715 
   7716 		mutex_enter(txq->txq_lock);
   7717 		txq->txq_sending = false; /* Ensure watchdog disabled */
   7718 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7719 			txs = &txq->txq_soft[i];
   7720 			if (txs->txs_mbuf != NULL) {
   7721 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   7722 				m_freem(txs->txs_mbuf);
   7723 				txs->txs_mbuf = NULL;
   7724 			}
   7725 		}
   7726 		/* Drain txq_interq */
   7727 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7728 			m_freem(m);
   7729 		mutex_exit(txq->txq_lock);
   7730 	}
   7731 
   7732 	/* Mark the interface as down and cancel the watchdog timer. */
   7733 	ifp->if_flags &= ~IFF_RUNNING;
   7734 	sc->sc_if_flags = ifp->if_flags;
   7735 
   7736 	if (disable) {
   7737 		for (i = 0; i < sc->sc_nqueues; i++) {
   7738 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7739 			mutex_enter(rxq->rxq_lock);
   7740 			wm_rxdrain(rxq);
   7741 			mutex_exit(rxq->rxq_lock);
   7742 		}
   7743 	}
   7744 
   7745 #if 0 /* notyet */
   7746 	if (sc->sc_type >= WM_T_82544)
   7747 		CSR_WRITE(sc, WMREG_WUC, 0);
   7748 #endif
   7749 }
   7750 
   7751 static void
   7752 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   7753 {
   7754 	struct mbuf *m;
   7755 	int i;
   7756 
   7757 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   7758 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   7759 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   7760 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   7761 		    m->m_data, m->m_len, m->m_flags);
   7762 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   7763 	    i, i == 1 ? "" : "s");
   7764 }
   7765 
   7766 /*
   7767  * wm_82547_txfifo_stall:
   7768  *
   7769  *	Callout used to wait for the 82547 Tx FIFO to drain,
   7770  *	reset the FIFO pointers, and restart packet transmission.
   7771  */
   7772 static void
   7773 wm_82547_txfifo_stall(void *arg)
   7774 {
   7775 	struct wm_softc *sc = arg;
   7776 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7777 
   7778 	mutex_enter(txq->txq_lock);
   7779 
   7780 	if (txq->txq_stopping)
   7781 		goto out;
   7782 
   7783 	if (txq->txq_fifo_stall) {
   7784 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   7785 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   7786 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   7787 			/*
   7788 			 * Packets have drained.  Stop transmitter, reset
   7789 			 * FIFO pointers, restart transmitter, and kick
   7790 			 * the packet queue.
   7791 			 */
   7792 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   7793 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   7794 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   7795 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   7796 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   7797 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   7798 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   7799 			CSR_WRITE_FLUSH(sc);
   7800 
   7801 			txq->txq_fifo_head = 0;
   7802 			txq->txq_fifo_stall = 0;
   7803 			wm_start_locked(&sc->sc_ethercom.ec_if);
   7804 		} else {
   7805 			/*
   7806 			 * Still waiting for packets to drain; try again in
   7807 			 * another tick.
   7808 			 */
   7809 			callout_schedule(&sc->sc_txfifo_ch, 1);
   7810 		}
   7811 	}
   7812 
   7813 out:
   7814 	mutex_exit(txq->txq_lock);
   7815 }
   7816 
   7817 /*
   7818  * wm_82547_txfifo_bugchk:
   7819  *
   7820  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   7821  *	prevent enqueueing a packet that would wrap around the end
   7822  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   7823  *
   7824  *	We do this by checking the amount of space before the end
   7825  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   7826  *	the Tx FIFO, wait for all remaining packets to drain, reset
   7827  *	the internal FIFO pointers to the beginning, and restart
   7828  *	transmission on the interface.
   7829  */
   7830 #define	WM_FIFO_HDR		0x10
   7831 #define	WM_82547_PAD_LEN	0x3e0
   7832 static int
   7833 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   7834 {
   7835 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7836 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   7837 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   7838 
   7839 	/* Just return if already stalled. */
   7840 	if (txq->txq_fifo_stall)
   7841 		return 1;
   7842 
   7843 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   7844 		/* Stall only occurs in half-duplex mode. */
   7845 		goto send_packet;
   7846 	}
   7847 
   7848 	if (len >= WM_82547_PAD_LEN + space) {
   7849 		txq->txq_fifo_stall = 1;
   7850 		callout_schedule(&sc->sc_txfifo_ch, 1);
   7851 		return 1;
   7852 	}
   7853 
   7854 send_packet:
   7855 	txq->txq_fifo_head += len;
   7856 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   7857 		txq->txq_fifo_head -= txq->txq_fifo_size;
   7858 
   7859 	return 0;
   7860 }
   7861 
   7862 static int
   7863 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7864 {
   7865 	int error;
   7866 
   7867 	/*
   7868 	 * Allocate the control data structures, and create and load the
   7869 	 * DMA map for it.
   7870 	 *
   7871 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7872 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7873 	 * both sets within the same 4G segment.
   7874 	 */
   7875 	if (sc->sc_type < WM_T_82544)
   7876 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   7877 	else
   7878 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   7879 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7880 		txq->txq_descsize = sizeof(nq_txdesc_t);
   7881 	else
   7882 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   7883 
   7884 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   7885 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   7886 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   7887 		aprint_error_dev(sc->sc_dev,
   7888 		    "unable to allocate TX control data, error = %d\n",
   7889 		    error);
   7890 		goto fail_0;
   7891 	}
   7892 
   7893 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   7894 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   7895 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7896 		aprint_error_dev(sc->sc_dev,
   7897 		    "unable to map TX control data, error = %d\n", error);
   7898 		goto fail_1;
   7899 	}
   7900 
   7901 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   7902 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   7903 		aprint_error_dev(sc->sc_dev,
   7904 		    "unable to create TX control data DMA map, error = %d\n",
   7905 		    error);
   7906 		goto fail_2;
   7907 	}
   7908 
   7909 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   7910 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   7911 		aprint_error_dev(sc->sc_dev,
   7912 		    "unable to load TX control data DMA map, error = %d\n",
   7913 		    error);
   7914 		goto fail_3;
   7915 	}
   7916 
   7917 	return 0;
   7918 
   7919 fail_3:
   7920 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7921 fail_2:
   7922 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7923 	    WM_TXDESCS_SIZE(txq));
   7924 fail_1:
   7925 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7926 fail_0:
   7927 	return error;
   7928 }
   7929 
   7930 static void
   7931 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7932 {
   7933 
   7934 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   7935 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7936 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7937 	    WM_TXDESCS_SIZE(txq));
   7938 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7939 }
   7940 
   7941 static int
   7942 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7943 {
   7944 	int error;
   7945 	size_t rxq_descs_size;
   7946 
   7947 	/*
   7948 	 * Allocate the control data structures, and create and load the
   7949 	 * DMA map for it.
   7950 	 *
   7951 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7952 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7953 	 * both sets within the same 4G segment.
   7954 	 */
   7955 	rxq->rxq_ndesc = WM_NRXDESC;
   7956 	if (sc->sc_type == WM_T_82574)
   7957 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   7958 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7959 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   7960 	else
   7961 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   7962 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   7963 
   7964 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   7965 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   7966 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   7967 		aprint_error_dev(sc->sc_dev,
   7968 		    "unable to allocate RX control data, error = %d\n",
   7969 		    error);
   7970 		goto fail_0;
   7971 	}
   7972 
   7973 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   7974 		    rxq->rxq_desc_rseg, rxq_descs_size,
   7975 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7976 		aprint_error_dev(sc->sc_dev,
   7977 		    "unable to map RX control data, error = %d\n", error);
   7978 		goto fail_1;
   7979 	}
   7980 
   7981 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   7982 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   7983 		aprint_error_dev(sc->sc_dev,
   7984 		    "unable to create RX control data DMA map, error = %d\n",
   7985 		    error);
   7986 		goto fail_2;
   7987 	}
   7988 
   7989 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   7990 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   7991 		aprint_error_dev(sc->sc_dev,
   7992 		    "unable to load RX control data DMA map, error = %d\n",
   7993 		    error);
   7994 		goto fail_3;
   7995 	}
   7996 
   7997 	return 0;
   7998 
   7999  fail_3:
   8000 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   8001  fail_2:
   8002 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   8003 	    rxq_descs_size);
   8004  fail_1:
   8005 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   8006  fail_0:
   8007 	return error;
   8008 }
   8009 
   8010 static void
   8011 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   8012 {
   8013 
   8014 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   8015 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   8016 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   8017 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   8018 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   8019 }
   8020 
   8021 
   8022 static int
   8023 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   8024 {
   8025 	int i, error;
   8026 
   8027 	/* Create the transmit buffer DMA maps. */
   8028 	WM_TXQUEUELEN(txq) =
   8029 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   8030 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   8031 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   8032 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   8033 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   8034 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   8035 			aprint_error_dev(sc->sc_dev,
   8036 			    "unable to create Tx DMA map %d, error = %d\n",
   8037 			    i, error);
   8038 			goto fail;
   8039 		}
   8040 	}
   8041 
   8042 	return 0;
   8043 
   8044 fail:
   8045 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   8046 		if (txq->txq_soft[i].txs_dmamap != NULL)
   8047 			bus_dmamap_destroy(sc->sc_dmat,
   8048 			    txq->txq_soft[i].txs_dmamap);
   8049 	}
   8050 	return error;
   8051 }
   8052 
   8053 static void
   8054 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   8055 {
   8056 	int i;
   8057 
   8058 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   8059 		if (txq->txq_soft[i].txs_dmamap != NULL)
   8060 			bus_dmamap_destroy(sc->sc_dmat,
   8061 			    txq->txq_soft[i].txs_dmamap);
   8062 	}
   8063 }
   8064 
   8065 static int
   8066 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   8067 {
   8068 	int i, error;
   8069 
   8070 	/* Create the receive buffer DMA maps. */
   8071 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   8072 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   8073 			    MCLBYTES, 0, 0,
   8074 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   8075 			aprint_error_dev(sc->sc_dev,
   8076 			    "unable to create Rx DMA map %d error = %d\n",
   8077 			    i, error);
   8078 			goto fail;
   8079 		}
   8080 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   8081 	}
   8082 
   8083 	return 0;
   8084 
   8085  fail:
   8086 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   8087 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   8088 			bus_dmamap_destroy(sc->sc_dmat,
   8089 			    rxq->rxq_soft[i].rxs_dmamap);
   8090 	}
   8091 	return error;
   8092 }
   8093 
   8094 static void
   8095 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   8096 {
   8097 	int i;
   8098 
   8099 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   8100 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   8101 			bus_dmamap_destroy(sc->sc_dmat,
   8102 			    rxq->rxq_soft[i].rxs_dmamap);
   8103 	}
   8104 }
   8105 
   8106 /*
   8107  * wm_alloc_quques:
   8108  *	Allocate {tx,rx}descs and {tx,rx} buffers
   8109  */
   8110 static int
   8111 wm_alloc_txrx_queues(struct wm_softc *sc)
   8112 {
   8113 	int i, error, tx_done, rx_done;
   8114 
   8115 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   8116 	    KM_SLEEP);
   8117 	if (sc->sc_queue == NULL) {
   8118 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   8119 		error = ENOMEM;
   8120 		goto fail_0;
   8121 	}
   8122 
   8123 	/* For transmission */
   8124 	error = 0;
   8125 	tx_done = 0;
   8126 	for (i = 0; i < sc->sc_nqueues; i++) {
   8127 #ifdef WM_EVENT_COUNTERS
   8128 		int j;
   8129 		const char *xname;
   8130 #endif
   8131 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   8132 		txq->txq_sc = sc;
   8133 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   8134 
   8135 		error = wm_alloc_tx_descs(sc, txq);
   8136 		if (error)
   8137 			break;
   8138 		error = wm_alloc_tx_buffer(sc, txq);
   8139 		if (error) {
   8140 			wm_free_tx_descs(sc, txq);
   8141 			break;
   8142 		}
   8143 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   8144 		if (txq->txq_interq == NULL) {
   8145 			wm_free_tx_descs(sc, txq);
   8146 			wm_free_tx_buffer(sc, txq);
   8147 			error = ENOMEM;
   8148 			break;
   8149 		}
   8150 
   8151 #ifdef WM_EVENT_COUNTERS
   8152 		xname = device_xname(sc->sc_dev);
   8153 
   8154 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   8155 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   8156 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   8157 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   8158 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   8159 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   8160 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   8161 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   8162 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   8163 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   8164 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   8165 
   8166 		for (j = 0; j < WM_NTXSEGS; j++) {
   8167 			snprintf(txq->txq_txseg_evcnt_names[j],
   8168 			    sizeof(txq->txq_txseg_evcnt_names[j]),
   8169 			    "txq%02dtxseg%d", i, j);
   8170 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j],
   8171 			    EVCNT_TYPE_MISC,
   8172 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   8173 		}
   8174 
   8175 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   8176 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   8177 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   8178 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   8179 		/* Only for 82544 (and earlier?) */
   8180 		if (sc->sc_type <= WM_T_82544)
   8181 			WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   8182 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   8183 #endif /* WM_EVENT_COUNTERS */
   8184 
   8185 		tx_done++;
   8186 	}
   8187 	if (error)
   8188 		goto fail_1;
   8189 
   8190 	/* For receive */
   8191 	error = 0;
   8192 	rx_done = 0;
   8193 	for (i = 0; i < sc->sc_nqueues; i++) {
   8194 #ifdef WM_EVENT_COUNTERS
   8195 		const char *xname;
   8196 #endif
   8197 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   8198 		rxq->rxq_sc = sc;
   8199 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   8200 
   8201 		error = wm_alloc_rx_descs(sc, rxq);
   8202 		if (error)
   8203 			break;
   8204 
   8205 		error = wm_alloc_rx_buffer(sc, rxq);
   8206 		if (error) {
   8207 			wm_free_rx_descs(sc, rxq);
   8208 			break;
   8209 		}
   8210 
   8211 #ifdef WM_EVENT_COUNTERS
   8212 		xname = device_xname(sc->sc_dev);
   8213 
   8214 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   8215 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   8216 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   8217 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   8218 		if ((sc->sc_type >= WM_T_82575) && !WM_IS_ICHPCH(sc))
   8219 			WM_Q_MISC_EVCNT_ATTACH(rxq, qdrop, rxq, i, xname);
   8220 #endif /* WM_EVENT_COUNTERS */
   8221 
   8222 		rx_done++;
   8223 	}
   8224 	if (error)
   8225 		goto fail_2;
   8226 
   8227 	return 0;
   8228 
   8229 fail_2:
   8230 	for (i = 0; i < rx_done; i++) {
   8231 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   8232 		wm_free_rx_buffer(sc, rxq);
   8233 		wm_free_rx_descs(sc, rxq);
   8234 		if (rxq->rxq_lock)
   8235 			mutex_obj_free(rxq->rxq_lock);
   8236 	}
   8237 fail_1:
   8238 	for (i = 0; i < tx_done; i++) {
   8239 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   8240 		pcq_destroy(txq->txq_interq);
   8241 		wm_free_tx_buffer(sc, txq);
   8242 		wm_free_tx_descs(sc, txq);
   8243 		if (txq->txq_lock)
   8244 			mutex_obj_free(txq->txq_lock);
   8245 	}
   8246 
   8247 	kmem_free(sc->sc_queue,
   8248 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   8249 fail_0:
   8250 	return error;
   8251 }
   8252 
   8253 /*
   8254  * wm_free_quques:
   8255  *	Free {tx,rx}descs and {tx,rx} buffers
   8256  */
   8257 static void
   8258 wm_free_txrx_queues(struct wm_softc *sc)
   8259 {
   8260 	int i;
   8261 
   8262 	for (i = 0; i < sc->sc_nqueues; i++) {
   8263 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   8264 
   8265 #ifdef WM_EVENT_COUNTERS
   8266 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   8267 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   8268 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   8269 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   8270 		if ((sc->sc_type >= WM_T_82575) && !WM_IS_ICHPCH(sc))
   8271 			WM_Q_EVCNT_DETACH(rxq, qdrop, rxq, i);
   8272 #endif /* WM_EVENT_COUNTERS */
   8273 
   8274 		wm_free_rx_buffer(sc, rxq);
   8275 		wm_free_rx_descs(sc, rxq);
   8276 		if (rxq->rxq_lock)
   8277 			mutex_obj_free(rxq->rxq_lock);
   8278 	}
   8279 
   8280 	for (i = 0; i < sc->sc_nqueues; i++) {
   8281 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   8282 		struct mbuf *m;
   8283 #ifdef WM_EVENT_COUNTERS
   8284 		int j;
   8285 
   8286 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   8287 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   8288 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   8289 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   8290 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   8291 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   8292 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   8293 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   8294 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   8295 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   8296 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   8297 
   8298 		for (j = 0; j < WM_NTXSEGS; j++)
   8299 			evcnt_detach(&txq->txq_ev_txseg[j]);
   8300 
   8301 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   8302 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   8303 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   8304 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   8305 		if (sc->sc_type <= WM_T_82544)
   8306 			WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   8307 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   8308 #endif /* WM_EVENT_COUNTERS */
   8309 
   8310 		/* Drain txq_interq */
   8311 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   8312 			m_freem(m);
   8313 		pcq_destroy(txq->txq_interq);
   8314 
   8315 		wm_free_tx_buffer(sc, txq);
   8316 		wm_free_tx_descs(sc, txq);
   8317 		if (txq->txq_lock)
   8318 			mutex_obj_free(txq->txq_lock);
   8319 	}
   8320 
   8321 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   8322 }
   8323 
   8324 static void
   8325 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   8326 {
   8327 
   8328 	KASSERT(mutex_owned(txq->txq_lock));
   8329 
   8330 	/* Initialize the transmit descriptor ring. */
   8331 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   8332 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   8333 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8334 	txq->txq_free = WM_NTXDESC(txq);
   8335 	txq->txq_next = 0;
   8336 }
   8337 
   8338 static void
   8339 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   8340     struct wm_txqueue *txq)
   8341 {
   8342 
   8343 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   8344 		device_xname(sc->sc_dev), __func__));
   8345 	KASSERT(mutex_owned(txq->txq_lock));
   8346 
   8347 	if (sc->sc_type < WM_T_82543) {
   8348 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   8349 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   8350 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   8351 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   8352 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   8353 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   8354 	} else {
   8355 		int qid = wmq->wmq_id;
   8356 
   8357 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   8358 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   8359 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   8360 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   8361 
   8362 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8363 			/*
   8364 			 * Don't write TDT before TCTL.EN is set.
   8365 			 * See the document.
   8366 			 */
   8367 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   8368 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   8369 			    | TXDCTL_WTHRESH(0));
   8370 		else {
   8371 			/* XXX should update with AIM? */
   8372 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   8373 			if (sc->sc_type >= WM_T_82540) {
   8374 				/* Should be the same */
   8375 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   8376 			}
   8377 
   8378 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   8379 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   8380 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   8381 		}
   8382 	}
   8383 }
   8384 
   8385 static void
   8386 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   8387 {
   8388 	int i;
   8389 
   8390 	KASSERT(mutex_owned(txq->txq_lock));
   8391 
   8392 	/* Initialize the transmit job descriptors. */
   8393 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   8394 		txq->txq_soft[i].txs_mbuf = NULL;
   8395 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   8396 	txq->txq_snext = 0;
   8397 	txq->txq_sdirty = 0;
   8398 }
   8399 
   8400 static void
   8401 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   8402     struct wm_txqueue *txq)
   8403 {
   8404 
   8405 	KASSERT(mutex_owned(txq->txq_lock));
   8406 
   8407 	/*
   8408 	 * Set up some register offsets that are different between
   8409 	 * the i82542 and the i82543 and later chips.
   8410 	 */
   8411 	if (sc->sc_type < WM_T_82543)
   8412 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   8413 	else
   8414 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   8415 
   8416 	wm_init_tx_descs(sc, txq);
   8417 	wm_init_tx_regs(sc, wmq, txq);
   8418 	wm_init_tx_buffer(sc, txq);
   8419 
   8420 	/* Clear other than WM_TXQ_LINKDOWN_DISCARD */
   8421 	txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD;
   8422 
   8423 	txq->txq_sending = false;
   8424 }
   8425 
   8426 static void
   8427 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   8428     struct wm_rxqueue *rxq)
   8429 {
   8430 
   8431 	KASSERT(mutex_owned(rxq->rxq_lock));
   8432 
   8433 	/*
   8434 	 * Initialize the receive descriptor and receive job
   8435 	 * descriptor rings.
   8436 	 */
   8437 	if (sc->sc_type < WM_T_82543) {
   8438 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   8439 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   8440 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   8441 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   8442 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   8443 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   8444 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   8445 
   8446 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   8447 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   8448 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   8449 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   8450 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   8451 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   8452 	} else {
   8453 		int qid = wmq->wmq_id;
   8454 
   8455 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   8456 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   8457 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   8458 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   8459 
   8460 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8461 			uint32_t srrctl;
   8462 
   8463 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   8464 				panic("%s: MCLBYTES %d unsupported for 82575 "
   8465 				    "or higher\n", __func__, MCLBYTES);
   8466 
   8467 			/*
   8468 			 * Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF
   8469 			 * only.
   8470 			 */
   8471 			srrctl = SRRCTL_DESCTYPE_ADV_ONEBUF
   8472 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT);
   8473 			/*
   8474 			 * Drop frames if the RX descriptor ring has no room.
   8475 			 * This is enabled only on multiqueue system to avoid
   8476 			 * bad influence to other queues.
   8477 			 */
   8478 			if (sc->sc_nqueues > 1)
   8479 				srrctl |= SRRCTL_DROP_EN;
   8480 			CSR_WRITE(sc, WMREG_SRRCTL(qid), srrctl);
   8481 
   8482 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   8483 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   8484 			    | RXDCTL_WTHRESH(1));
   8485 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   8486 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   8487 		} else {
   8488 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   8489 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   8490 			/* XXX should update with AIM? */
   8491 			CSR_WRITE(sc, WMREG_RDTR,
   8492 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   8493 			/* MUST be same */
   8494 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   8495 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   8496 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   8497 		}
   8498 	}
   8499 }
   8500 
   8501 static int
   8502 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   8503 {
   8504 	struct wm_rxsoft *rxs;
   8505 	int error, i;
   8506 
   8507 	KASSERT(mutex_owned(rxq->rxq_lock));
   8508 
   8509 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   8510 		rxs = &rxq->rxq_soft[i];
   8511 		if (rxs->rxs_mbuf == NULL) {
   8512 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   8513 				log(LOG_ERR, "%s: unable to allocate or map "
   8514 				    "rx buffer %d, error = %d\n",
   8515 				    device_xname(sc->sc_dev), i, error);
   8516 				/*
   8517 				 * XXX Should attempt to run with fewer receive
   8518 				 * XXX buffers instead of just failing.
   8519 				 */
   8520 				wm_rxdrain(rxq);
   8521 				return ENOMEM;
   8522 			}
   8523 		} else {
   8524 			/*
   8525 			 * For 82575 and 82576, the RX descriptors must be
   8526 			 * initialized after the setting of RCTL.EN in
   8527 			 * wm_set_filter()
   8528 			 */
   8529 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   8530 				wm_init_rxdesc(rxq, i);
   8531 		}
   8532 	}
   8533 	rxq->rxq_ptr = 0;
   8534 	rxq->rxq_discard = 0;
   8535 	WM_RXCHAIN_RESET(rxq);
   8536 
   8537 	return 0;
   8538 }
   8539 
   8540 static int
   8541 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   8542     struct wm_rxqueue *rxq)
   8543 {
   8544 
   8545 	KASSERT(mutex_owned(rxq->rxq_lock));
   8546 
   8547 	/*
   8548 	 * Set up some register offsets that are different between
   8549 	 * the i82542 and the i82543 and later chips.
   8550 	 */
   8551 	if (sc->sc_type < WM_T_82543)
   8552 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   8553 	else
   8554 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   8555 
   8556 	wm_init_rx_regs(sc, wmq, rxq);
   8557 	return wm_init_rx_buffer(sc, rxq);
   8558 }
   8559 
   8560 /*
   8561  * wm_init_quques:
   8562  *	Initialize {tx,rx}descs and {tx,rx} buffers
   8563  */
   8564 static int
   8565 wm_init_txrx_queues(struct wm_softc *sc)
   8566 {
   8567 	int i, error = 0;
   8568 
   8569 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   8570 		device_xname(sc->sc_dev), __func__));
   8571 
   8572 	for (i = 0; i < sc->sc_nqueues; i++) {
   8573 		struct wm_queue *wmq = &sc->sc_queue[i];
   8574 		struct wm_txqueue *txq = &wmq->wmq_txq;
   8575 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8576 
   8577 		/*
   8578 		 * TODO
   8579 		 * Currently, use constant variable instead of AIM.
   8580 		 * Furthermore, the interrupt interval of multiqueue which use
   8581 		 * polling mode is less than default value.
   8582 		 * More tuning and AIM are required.
   8583 		 */
   8584 		if (wm_is_using_multiqueue(sc))
   8585 			wmq->wmq_itr = 50;
   8586 		else
   8587 			wmq->wmq_itr = sc->sc_itr_init;
   8588 		wmq->wmq_set_itr = true;
   8589 
   8590 		mutex_enter(txq->txq_lock);
   8591 		wm_init_tx_queue(sc, wmq, txq);
   8592 		mutex_exit(txq->txq_lock);
   8593 
   8594 		mutex_enter(rxq->rxq_lock);
   8595 		error = wm_init_rx_queue(sc, wmq, rxq);
   8596 		mutex_exit(rxq->rxq_lock);
   8597 		if (error)
   8598 			break;
   8599 	}
   8600 
   8601 	return error;
   8602 }
   8603 
   8604 /*
   8605  * wm_tx_offload:
   8606  *
   8607  *	Set up TCP/IP checksumming parameters for the
   8608  *	specified packet.
   8609  */
   8610 static void
   8611 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8612     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   8613 {
   8614 	struct mbuf *m0 = txs->txs_mbuf;
   8615 	struct livengood_tcpip_ctxdesc *t;
   8616 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   8617 	uint32_t ipcse;
   8618 	struct ether_header *eh;
   8619 	int offset, iphl;
   8620 	uint8_t fields;
   8621 
   8622 	/*
   8623 	 * XXX It would be nice if the mbuf pkthdr had offset
   8624 	 * fields for the protocol headers.
   8625 	 */
   8626 
   8627 	eh = mtod(m0, struct ether_header *);
   8628 	switch (htons(eh->ether_type)) {
   8629 	case ETHERTYPE_IP:
   8630 	case ETHERTYPE_IPV6:
   8631 		offset = ETHER_HDR_LEN;
   8632 		break;
   8633 
   8634 	case ETHERTYPE_VLAN:
   8635 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8636 		break;
   8637 
   8638 	default:
   8639 		/* Don't support this protocol or encapsulation. */
   8640 		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8641 		txq->txq_last_hw_ipcs = 0;
   8642 		txq->txq_last_hw_tucs = 0;
   8643 		*fieldsp = 0;
   8644 		*cmdp = 0;
   8645 		return;
   8646 	}
   8647 
   8648 	if ((m0->m_pkthdr.csum_flags &
   8649 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8650 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8651 	} else
   8652 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8653 
   8654 	ipcse = offset + iphl - 1;
   8655 
   8656 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   8657 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   8658 	seg = 0;
   8659 	fields = 0;
   8660 
   8661 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8662 		int hlen = offset + iphl;
   8663 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8664 
   8665 		if (__predict_false(m0->m_len <
   8666 				    (hlen + sizeof(struct tcphdr)))) {
   8667 			/*
   8668 			 * TCP/IP headers are not in the first mbuf; we need
   8669 			 * to do this the slow and painful way. Let's just
   8670 			 * hope this doesn't happen very often.
   8671 			 */
   8672 			struct tcphdr th;
   8673 
   8674 			WM_Q_EVCNT_INCR(txq, tsopain);
   8675 
   8676 			m_copydata(m0, hlen, sizeof(th), &th);
   8677 			if (v4) {
   8678 				struct ip ip;
   8679 
   8680 				m_copydata(m0, offset, sizeof(ip), &ip);
   8681 				ip.ip_len = 0;
   8682 				m_copyback(m0,
   8683 				    offset + offsetof(struct ip, ip_len),
   8684 				    sizeof(ip.ip_len), &ip.ip_len);
   8685 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8686 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8687 			} else {
   8688 				struct ip6_hdr ip6;
   8689 
   8690 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8691 				ip6.ip6_plen = 0;
   8692 				m_copyback(m0,
   8693 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8694 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8695 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8696 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8697 			}
   8698 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8699 			    sizeof(th.th_sum), &th.th_sum);
   8700 
   8701 			hlen += th.th_off << 2;
   8702 		} else {
   8703 			/*
   8704 			 * TCP/IP headers are in the first mbuf; we can do
   8705 			 * this the easy way.
   8706 			 */
   8707 			struct tcphdr *th;
   8708 
   8709 			if (v4) {
   8710 				struct ip *ip =
   8711 				    (void *)(mtod(m0, char *) + offset);
   8712 				th = (void *)(mtod(m0, char *) + hlen);
   8713 
   8714 				ip->ip_len = 0;
   8715 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8716 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8717 			} else {
   8718 				struct ip6_hdr *ip6 =
   8719 				    (void *)(mtod(m0, char *) + offset);
   8720 				th = (void *)(mtod(m0, char *) + hlen);
   8721 
   8722 				ip6->ip6_plen = 0;
   8723 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8724 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8725 			}
   8726 			hlen += th->th_off << 2;
   8727 		}
   8728 
   8729 		if (v4) {
   8730 			WM_Q_EVCNT_INCR(txq, tso);
   8731 			cmdlen |= WTX_TCPIP_CMD_IP;
   8732 		} else {
   8733 			WM_Q_EVCNT_INCR(txq, tso6);
   8734 			ipcse = 0;
   8735 		}
   8736 		cmd |= WTX_TCPIP_CMD_TSE;
   8737 		cmdlen |= WTX_TCPIP_CMD_TSE |
   8738 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   8739 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   8740 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   8741 	}
   8742 
   8743 	/*
   8744 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   8745 	 * offload feature, if we load the context descriptor, we
   8746 	 * MUST provide valid values for IPCSS and TUCSS fields.
   8747 	 */
   8748 
   8749 	ipcs = WTX_TCPIP_IPCSS(offset) |
   8750 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   8751 	    WTX_TCPIP_IPCSE(ipcse);
   8752 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   8753 		WM_Q_EVCNT_INCR(txq, ipsum);
   8754 		fields |= WTX_IXSM;
   8755 	}
   8756 
   8757 	offset += iphl;
   8758 
   8759 	if (m0->m_pkthdr.csum_flags &
   8760 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   8761 		WM_Q_EVCNT_INCR(txq, tusum);
   8762 		fields |= WTX_TXSM;
   8763 		tucs = WTX_TCPIP_TUCSS(offset) |
   8764 		    WTX_TCPIP_TUCSO(offset +
   8765 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   8766 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8767 	} else if ((m0->m_pkthdr.csum_flags &
   8768 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   8769 		WM_Q_EVCNT_INCR(txq, tusum6);
   8770 		fields |= WTX_TXSM;
   8771 		tucs = WTX_TCPIP_TUCSS(offset) |
   8772 		    WTX_TCPIP_TUCSO(offset +
   8773 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   8774 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8775 	} else {
   8776 		/* Just initialize it to a valid TCP context. */
   8777 		tucs = WTX_TCPIP_TUCSS(offset) |
   8778 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   8779 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8780 	}
   8781 
   8782 	*cmdp = cmd;
   8783 	*fieldsp = fields;
   8784 
   8785 	/*
   8786 	 * We don't have to write context descriptor for every packet
   8787 	 * except for 82574. For 82574, we must write context descriptor
   8788 	 * for every packet when we use two descriptor queues.
   8789 	 *
   8790 	 * The 82574L can only remember the *last* context used
   8791 	 * regardless of queue that it was use for.  We cannot reuse
   8792 	 * contexts on this hardware platform and must generate a new
   8793 	 * context every time.  82574L hardware spec, section 7.2.6,
   8794 	 * second note.
   8795 	 */
   8796 	if (sc->sc_nqueues < 2) {
   8797 		/*
   8798 		 * Setting up new checksum offload context for every
   8799 		 * frames takes a lot of processing time for hardware.
   8800 		 * This also reduces performance a lot for small sized
   8801 		 * frames so avoid it if driver can use previously
   8802 		 * configured checksum offload context.
   8803 		 * For TSO, in theory we can use the same TSO context only if
   8804 		 * frame is the same type(IP/TCP) and the same MSS. However
   8805 		 * checking whether a frame has the same IP/TCP structure is a
   8806 		 * hard thing so just ignore that and always restablish a
   8807 		 * new TSO context.
   8808 		 */
   8809 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   8810 		    == 0) {
   8811 			if (txq->txq_last_hw_cmd == cmd &&
   8812 			    txq->txq_last_hw_fields == fields &&
   8813 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   8814 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   8815 				WM_Q_EVCNT_INCR(txq, skipcontext);
   8816 				return;
   8817 			}
   8818 		}
   8819 
   8820 		txq->txq_last_hw_cmd = cmd;
   8821 		txq->txq_last_hw_fields = fields;
   8822 		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   8823 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   8824 	}
   8825 
   8826 	/* Fill in the context descriptor. */
   8827 	t = (struct livengood_tcpip_ctxdesc *)
   8828 	    &txq->txq_descs[txq->txq_next];
   8829 	t->tcpip_ipcs = htole32(ipcs);
   8830 	t->tcpip_tucs = htole32(tucs);
   8831 	t->tcpip_cmdlen = htole32(cmdlen);
   8832 	t->tcpip_seg = htole32(seg);
   8833 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8834 
   8835 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8836 	txs->txs_ndesc++;
   8837 }
   8838 
   8839 static inline int
   8840 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   8841 {
   8842 	struct wm_softc *sc = ifp->if_softc;
   8843 	u_int cpuid = cpu_index(curcpu());
   8844 
   8845 	/*
   8846 	 * Currently, simple distribute strategy.
   8847 	 * TODO:
   8848 	 * distribute by flowid(RSS has value).
   8849 	 */
   8850 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   8851 }
   8852 
   8853 static inline bool
   8854 wm_linkdown_discard(struct wm_txqueue *txq)
   8855 {
   8856 
   8857 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
   8858 		return true;
   8859 
   8860 	return false;
   8861 }
   8862 
   8863 /*
   8864  * wm_start:		[ifnet interface function]
   8865  *
   8866  *	Start packet transmission on the interface.
   8867  */
   8868 static void
   8869 wm_start(struct ifnet *ifp)
   8870 {
   8871 	struct wm_softc *sc = ifp->if_softc;
   8872 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8873 
   8874 	KASSERT(if_is_mpsafe(ifp));
   8875 	/*
   8876 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8877 	 */
   8878 
   8879 	mutex_enter(txq->txq_lock);
   8880 	if (!txq->txq_stopping)
   8881 		wm_start_locked(ifp);
   8882 	mutex_exit(txq->txq_lock);
   8883 }
   8884 
   8885 static void
   8886 wm_start_locked(struct ifnet *ifp)
   8887 {
   8888 	struct wm_softc *sc = ifp->if_softc;
   8889 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8890 
   8891 	wm_send_common_locked(ifp, txq, false);
   8892 }
   8893 
   8894 static int
   8895 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   8896 {
   8897 	int qid;
   8898 	struct wm_softc *sc = ifp->if_softc;
   8899 	struct wm_txqueue *txq;
   8900 
   8901 	qid = wm_select_txqueue(ifp, m);
   8902 	txq = &sc->sc_queue[qid].wmq_txq;
   8903 
   8904 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8905 		m_freem(m);
   8906 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8907 		return ENOBUFS;
   8908 	}
   8909 
   8910 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8911 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8912 	if (m->m_flags & M_MCAST)
   8913 		if_statinc_ref(nsr, if_omcasts);
   8914 	IF_STAT_PUTREF(ifp);
   8915 
   8916 	if (mutex_tryenter(txq->txq_lock)) {
   8917 		if (!txq->txq_stopping)
   8918 			wm_transmit_locked(ifp, txq);
   8919 		mutex_exit(txq->txq_lock);
   8920 	}
   8921 
   8922 	return 0;
   8923 }
   8924 
   8925 static void
   8926 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8927 {
   8928 
   8929 	wm_send_common_locked(ifp, txq, true);
   8930 }
   8931 
   8932 static void
   8933 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8934     bool is_transmit)
   8935 {
   8936 	struct wm_softc *sc = ifp->if_softc;
   8937 	struct mbuf *m0;
   8938 	struct wm_txsoft *txs;
   8939 	bus_dmamap_t dmamap;
   8940 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   8941 	bus_addr_t curaddr;
   8942 	bus_size_t seglen, curlen;
   8943 	uint32_t cksumcmd;
   8944 	uint8_t cksumfields;
   8945 	bool remap = true;
   8946 
   8947 	KASSERT(mutex_owned(txq->txq_lock));
   8948 	KASSERT(!txq->txq_stopping);
   8949 
   8950 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8951 		return;
   8952 
   8953 	if (__predict_false(wm_linkdown_discard(txq))) {
   8954 		do {
   8955 			if (is_transmit)
   8956 				m0 = pcq_get(txq->txq_interq);
   8957 			else
   8958 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   8959 			/*
   8960 			 * increment successed packet counter as in the case
   8961 			 * which the packet is discarded by link down PHY.
   8962 			 */
   8963 			if (m0 != NULL) {
   8964 				if_statinc(ifp, if_opackets);
   8965 				m_freem(m0);
   8966 			}
   8967 		} while (m0 != NULL);
   8968 		return;
   8969 	}
   8970 
   8971 	/* Remember the previous number of free descriptors. */
   8972 	ofree = txq->txq_free;
   8973 
   8974 	/*
   8975 	 * Loop through the send queue, setting up transmit descriptors
   8976 	 * until we drain the queue, or use up all available transmit
   8977 	 * descriptors.
   8978 	 */
   8979 	for (;;) {
   8980 		m0 = NULL;
   8981 
   8982 		/* Get a work queue entry. */
   8983 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8984 			wm_txeof(txq, UINT_MAX);
   8985 			if (txq->txq_sfree == 0) {
   8986 				DPRINTF(sc, WM_DEBUG_TX,
   8987 				    ("%s: TX: no free job descriptors\n",
   8988 					device_xname(sc->sc_dev)));
   8989 				WM_Q_EVCNT_INCR(txq, txsstall);
   8990 				break;
   8991 			}
   8992 		}
   8993 
   8994 		/* Grab a packet off the queue. */
   8995 		if (is_transmit)
   8996 			m0 = pcq_get(txq->txq_interq);
   8997 		else
   8998 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8999 		if (m0 == NULL)
   9000 			break;
   9001 
   9002 		DPRINTF(sc, WM_DEBUG_TX,
   9003 		    ("%s: TX: have packet to transmit: %p\n",
   9004 			device_xname(sc->sc_dev), m0));
   9005 
   9006 		txs = &txq->txq_soft[txq->txq_snext];
   9007 		dmamap = txs->txs_dmamap;
   9008 
   9009 		use_tso = (m0->m_pkthdr.csum_flags &
   9010 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   9011 
   9012 		/*
   9013 		 * So says the Linux driver:
   9014 		 * The controller does a simple calculation to make sure
   9015 		 * there is enough room in the FIFO before initiating the
   9016 		 * DMA for each buffer. The calc is:
   9017 		 *	4 = ceil(buffer len / MSS)
   9018 		 * To make sure we don't overrun the FIFO, adjust the max
   9019 		 * buffer len if the MSS drops.
   9020 		 */
   9021 		dmamap->dm_maxsegsz =
   9022 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   9023 		    ? m0->m_pkthdr.segsz << 2
   9024 		    : WTX_MAX_LEN;
   9025 
   9026 		/*
   9027 		 * Load the DMA map.  If this fails, the packet either
   9028 		 * didn't fit in the allotted number of segments, or we
   9029 		 * were short on resources.  For the too-many-segments
   9030 		 * case, we simply report an error and drop the packet,
   9031 		 * since we can't sanely copy a jumbo packet to a single
   9032 		 * buffer.
   9033 		 */
   9034 retry:
   9035 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   9036 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   9037 		if (__predict_false(error)) {
   9038 			if (error == EFBIG) {
   9039 				if (remap == true) {
   9040 					struct mbuf *m;
   9041 
   9042 					remap = false;
   9043 					m = m_defrag(m0, M_NOWAIT);
   9044 					if (m != NULL) {
   9045 						WM_Q_EVCNT_INCR(txq, defrag);
   9046 						m0 = m;
   9047 						goto retry;
   9048 					}
   9049 				}
   9050 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   9051 				log(LOG_ERR, "%s: Tx packet consumes too many "
   9052 				    "DMA segments, dropping...\n",
   9053 				    device_xname(sc->sc_dev));
   9054 				wm_dump_mbuf_chain(sc, m0);
   9055 				m_freem(m0);
   9056 				continue;
   9057 			}
   9058 			/* Short on resources, just stop for now. */
   9059 			DPRINTF(sc, WM_DEBUG_TX,
   9060 			    ("%s: TX: dmamap load failed: %d\n",
   9061 				device_xname(sc->sc_dev), error));
   9062 			break;
   9063 		}
   9064 
   9065 		segs_needed = dmamap->dm_nsegs;
   9066 		if (use_tso) {
   9067 			/* For sentinel descriptor; see below. */
   9068 			segs_needed++;
   9069 		}
   9070 
   9071 		/*
   9072 		 * Ensure we have enough descriptors free to describe
   9073 		 * the packet. Note, we always reserve one descriptor
   9074 		 * at the end of the ring due to the semantics of the
   9075 		 * TDT register, plus one more in the event we need
   9076 		 * to load offload context.
   9077 		 */
   9078 		if (segs_needed > txq->txq_free - 2) {
   9079 			/*
   9080 			 * Not enough free descriptors to transmit this
   9081 			 * packet.  We haven't committed anything yet,
   9082 			 * so just unload the DMA map, put the packet
   9083 			 * pack on the queue, and punt. Notify the upper
   9084 			 * layer that there are no more slots left.
   9085 			 */
   9086 			DPRINTF(sc, WM_DEBUG_TX,
   9087 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   9088 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   9089 				segs_needed, txq->txq_free - 1));
   9090 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   9091 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   9092 			WM_Q_EVCNT_INCR(txq, txdstall);
   9093 			break;
   9094 		}
   9095 
   9096 		/*
   9097 		 * Check for 82547 Tx FIFO bug. We need to do this
   9098 		 * once we know we can transmit the packet, since we
   9099 		 * do some internal FIFO space accounting here.
   9100 		 */
   9101 		if (sc->sc_type == WM_T_82547 &&
   9102 		    wm_82547_txfifo_bugchk(sc, m0)) {
   9103 			DPRINTF(sc, WM_DEBUG_TX,
   9104 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   9105 				device_xname(sc->sc_dev)));
   9106 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   9107 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   9108 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   9109 			break;
   9110 		}
   9111 
   9112 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   9113 
   9114 		DPRINTF(sc, WM_DEBUG_TX,
   9115 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   9116 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   9117 
   9118 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   9119 
   9120 		/*
   9121 		 * Store a pointer to the packet so that we can free it
   9122 		 * later.
   9123 		 *
   9124 		 * Initially, we consider the number of descriptors the
   9125 		 * packet uses the number of DMA segments.  This may be
   9126 		 * incremented by 1 if we do checksum offload (a descriptor
   9127 		 * is used to set the checksum context).
   9128 		 */
   9129 		txs->txs_mbuf = m0;
   9130 		txs->txs_firstdesc = txq->txq_next;
   9131 		txs->txs_ndesc = segs_needed;
   9132 
   9133 		/* Set up offload parameters for this packet. */
   9134 		if (m0->m_pkthdr.csum_flags &
   9135 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   9136 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9137 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   9138 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   9139 		} else {
   9140 			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   9141 			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   9142 			cksumcmd = 0;
   9143 			cksumfields = 0;
   9144 		}
   9145 
   9146 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   9147 
   9148 		/* Sync the DMA map. */
   9149 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   9150 		    BUS_DMASYNC_PREWRITE);
   9151 
   9152 		/* Initialize the transmit descriptor. */
   9153 		for (nexttx = txq->txq_next, seg = 0;
   9154 		     seg < dmamap->dm_nsegs; seg++) {
   9155 			for (seglen = dmamap->dm_segs[seg].ds_len,
   9156 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   9157 			     seglen != 0;
   9158 			     curaddr += curlen, seglen -= curlen,
   9159 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   9160 				curlen = seglen;
   9161 
   9162 				/*
   9163 				 * So says the Linux driver:
   9164 				 * Work around for premature descriptor
   9165 				 * write-backs in TSO mode.  Append a
   9166 				 * 4-byte sentinel descriptor.
   9167 				 */
   9168 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   9169 				    curlen > 8)
   9170 					curlen -= 4;
   9171 
   9172 				wm_set_dma_addr(
   9173 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   9174 				txq->txq_descs[nexttx].wtx_cmdlen
   9175 				    = htole32(cksumcmd | curlen);
   9176 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   9177 				    = 0;
   9178 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   9179 				    = cksumfields;
   9180 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   9181 				lasttx = nexttx;
   9182 
   9183 				DPRINTF(sc, WM_DEBUG_TX,
   9184 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   9185 					"len %#04zx\n",
   9186 					device_xname(sc->sc_dev), nexttx,
   9187 					(uint64_t)curaddr, curlen));
   9188 			}
   9189 		}
   9190 
   9191 		KASSERT(lasttx != -1);
   9192 
   9193 		/*
   9194 		 * Set up the command byte on the last descriptor of
   9195 		 * the packet. If we're in the interrupt delay window,
   9196 		 * delay the interrupt.
   9197 		 */
   9198 		txq->txq_descs[lasttx].wtx_cmdlen |=
   9199 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   9200 
   9201 		/*
   9202 		 * If VLANs are enabled and the packet has a VLAN tag, set
   9203 		 * up the descriptor to encapsulate the packet for us.
   9204 		 *
   9205 		 * This is only valid on the last descriptor of the packet.
   9206 		 */
   9207 		if (vlan_has_tag(m0)) {
   9208 			txq->txq_descs[lasttx].wtx_cmdlen |=
   9209 			    htole32(WTX_CMD_VLE);
   9210 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   9211 			    = htole16(vlan_get_tag(m0));
   9212 		}
   9213 
   9214 		txs->txs_lastdesc = lasttx;
   9215 
   9216 		DPRINTF(sc, WM_DEBUG_TX,
   9217 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   9218 			device_xname(sc->sc_dev),
   9219 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   9220 
   9221 		/* Sync the descriptors we're using. */
   9222 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   9223 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   9224 
   9225 		/* Give the packet to the chip. */
   9226 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   9227 
   9228 		DPRINTF(sc, WM_DEBUG_TX,
   9229 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   9230 
   9231 		DPRINTF(sc, WM_DEBUG_TX,
   9232 		    ("%s: TX: finished transmitting packet, job %d\n",
   9233 			device_xname(sc->sc_dev), txq->txq_snext));
   9234 
   9235 		/* Advance the tx pointer. */
   9236 		txq->txq_free -= txs->txs_ndesc;
   9237 		txq->txq_next = nexttx;
   9238 
   9239 		txq->txq_sfree--;
   9240 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   9241 
   9242 		/* Pass the packet to any BPF listeners. */
   9243 		bpf_mtap(ifp, m0, BPF_D_OUT);
   9244 	}
   9245 
   9246 	if (m0 != NULL) {
   9247 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9248 		WM_Q_EVCNT_INCR(txq, descdrop);
   9249 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   9250 			__func__));
   9251 		m_freem(m0);
   9252 	}
   9253 
   9254 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   9255 		/* No more slots; notify upper layer. */
   9256 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9257 	}
   9258 
   9259 	if (txq->txq_free != ofree) {
   9260 		/* Set a watchdog timer in case the chip flakes out. */
   9261 		txq->txq_lastsent = time_uptime;
   9262 		txq->txq_sending = true;
   9263 	}
   9264 }
   9265 
   9266 /*
   9267  * wm_nq_tx_offload:
   9268  *
   9269  *	Set up TCP/IP checksumming parameters for the
   9270  *	specified packet, for NEWQUEUE devices
   9271  */
   9272 static void
   9273 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   9274     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   9275 {
   9276 	struct mbuf *m0 = txs->txs_mbuf;
   9277 	uint32_t vl_len, mssidx, cmdc;
   9278 	struct ether_header *eh;
   9279 	int offset, iphl;
   9280 
   9281 	/*
   9282 	 * XXX It would be nice if the mbuf pkthdr had offset
   9283 	 * fields for the protocol headers.
   9284 	 */
   9285 	*cmdlenp = 0;
   9286 	*fieldsp = 0;
   9287 
   9288 	eh = mtod(m0, struct ether_header *);
   9289 	switch (htons(eh->ether_type)) {
   9290 	case ETHERTYPE_IP:
   9291 	case ETHERTYPE_IPV6:
   9292 		offset = ETHER_HDR_LEN;
   9293 		break;
   9294 
   9295 	case ETHERTYPE_VLAN:
   9296 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   9297 		break;
   9298 
   9299 	default:
   9300 		/* Don't support this protocol or encapsulation. */
   9301 		*do_csum = false;
   9302 		return;
   9303 	}
   9304 	*do_csum = true;
   9305 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   9306 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   9307 
   9308 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   9309 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   9310 
   9311 	if ((m0->m_pkthdr.csum_flags &
   9312 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   9313 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   9314 	} else {
   9315 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   9316 	}
   9317 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   9318 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   9319 
   9320 	if (vlan_has_tag(m0)) {
   9321 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   9322 		    << NQTXC_VLLEN_VLAN_SHIFT);
   9323 		*cmdlenp |= NQTX_CMD_VLE;
   9324 	}
   9325 
   9326 	mssidx = 0;
   9327 
   9328 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   9329 		int hlen = offset + iphl;
   9330 		int tcp_hlen;
   9331 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   9332 
   9333 		if (__predict_false(m0->m_len <
   9334 				    (hlen + sizeof(struct tcphdr)))) {
   9335 			/*
   9336 			 * TCP/IP headers are not in the first mbuf; we need
   9337 			 * to do this the slow and painful way. Let's just
   9338 			 * hope this doesn't happen very often.
   9339 			 */
   9340 			struct tcphdr th;
   9341 
   9342 			WM_Q_EVCNT_INCR(txq, tsopain);
   9343 
   9344 			m_copydata(m0, hlen, sizeof(th), &th);
   9345 			if (v4) {
   9346 				struct ip ip;
   9347 
   9348 				m_copydata(m0, offset, sizeof(ip), &ip);
   9349 				ip.ip_len = 0;
   9350 				m_copyback(m0,
   9351 				    offset + offsetof(struct ip, ip_len),
   9352 				    sizeof(ip.ip_len), &ip.ip_len);
   9353 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   9354 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   9355 			} else {
   9356 				struct ip6_hdr ip6;
   9357 
   9358 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   9359 				ip6.ip6_plen = 0;
   9360 				m_copyback(m0,
   9361 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   9362 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   9363 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   9364 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   9365 			}
   9366 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   9367 			    sizeof(th.th_sum), &th.th_sum);
   9368 
   9369 			tcp_hlen = th.th_off << 2;
   9370 		} else {
   9371 			/*
   9372 			 * TCP/IP headers are in the first mbuf; we can do
   9373 			 * this the easy way.
   9374 			 */
   9375 			struct tcphdr *th;
   9376 
   9377 			if (v4) {
   9378 				struct ip *ip =
   9379 				    (void *)(mtod(m0, char *) + offset);
   9380 				th = (void *)(mtod(m0, char *) + hlen);
   9381 
   9382 				ip->ip_len = 0;
   9383 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   9384 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   9385 			} else {
   9386 				struct ip6_hdr *ip6 =
   9387 				    (void *)(mtod(m0, char *) + offset);
   9388 				th = (void *)(mtod(m0, char *) + hlen);
   9389 
   9390 				ip6->ip6_plen = 0;
   9391 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   9392 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   9393 			}
   9394 			tcp_hlen = th->th_off << 2;
   9395 		}
   9396 		hlen += tcp_hlen;
   9397 		*cmdlenp |= NQTX_CMD_TSE;
   9398 
   9399 		if (v4) {
   9400 			WM_Q_EVCNT_INCR(txq, tso);
   9401 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   9402 		} else {
   9403 			WM_Q_EVCNT_INCR(txq, tso6);
   9404 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   9405 		}
   9406 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   9407 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   9408 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   9409 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   9410 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   9411 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   9412 	} else {
   9413 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   9414 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   9415 	}
   9416 
   9417 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   9418 		*fieldsp |= NQTXD_FIELDS_IXSM;
   9419 		cmdc |= NQTXC_CMD_IP4;
   9420 	}
   9421 
   9422 	if (m0->m_pkthdr.csum_flags &
   9423 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   9424 		WM_Q_EVCNT_INCR(txq, tusum);
   9425 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   9426 			cmdc |= NQTXC_CMD_TCP;
   9427 		else
   9428 			cmdc |= NQTXC_CMD_UDP;
   9429 
   9430 		cmdc |= NQTXC_CMD_IP4;
   9431 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   9432 	}
   9433 	if (m0->m_pkthdr.csum_flags &
   9434 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   9435 		WM_Q_EVCNT_INCR(txq, tusum6);
   9436 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   9437 			cmdc |= NQTXC_CMD_TCP;
   9438 		else
   9439 			cmdc |= NQTXC_CMD_UDP;
   9440 
   9441 		cmdc |= NQTXC_CMD_IP6;
   9442 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   9443 	}
   9444 
   9445 	/*
   9446 	 * We don't have to write context descriptor for every packet to
   9447 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   9448 	 * I210 and I211. It is enough to write once per a Tx queue for these
   9449 	 * controllers.
   9450 	 * It would be overhead to write context descriptor for every packet,
   9451 	 * however it does not cause problems.
   9452 	 */
   9453 	/* Fill in the context descriptor. */
   9454 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_vl_len =
   9455 	    htole32(vl_len);
   9456 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_sn = 0;
   9457 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_cmd =
   9458 	    htole32(cmdc);
   9459 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_mssidx =
   9460 	    htole32(mssidx);
   9461 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   9462 	DPRINTF(sc, WM_DEBUG_TX,
   9463 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   9464 		txq->txq_next, 0, vl_len));
   9465 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   9466 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   9467 	txs->txs_ndesc++;
   9468 }
   9469 
   9470 /*
   9471  * wm_nq_start:		[ifnet interface function]
   9472  *
   9473  *	Start packet transmission on the interface for NEWQUEUE devices
   9474  */
   9475 static void
   9476 wm_nq_start(struct ifnet *ifp)
   9477 {
   9478 	struct wm_softc *sc = ifp->if_softc;
   9479 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   9480 
   9481 	KASSERT(if_is_mpsafe(ifp));
   9482 	/*
   9483 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   9484 	 */
   9485 
   9486 	mutex_enter(txq->txq_lock);
   9487 	if (!txq->txq_stopping)
   9488 		wm_nq_start_locked(ifp);
   9489 	mutex_exit(txq->txq_lock);
   9490 }
   9491 
   9492 static void
   9493 wm_nq_start_locked(struct ifnet *ifp)
   9494 {
   9495 	struct wm_softc *sc = ifp->if_softc;
   9496 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   9497 
   9498 	wm_nq_send_common_locked(ifp, txq, false);
   9499 }
   9500 
   9501 static int
   9502 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   9503 {
   9504 	int qid;
   9505 	struct wm_softc *sc = ifp->if_softc;
   9506 	struct wm_txqueue *txq;
   9507 
   9508 	qid = wm_select_txqueue(ifp, m);
   9509 	txq = &sc->sc_queue[qid].wmq_txq;
   9510 
   9511 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   9512 		m_freem(m);
   9513 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   9514 		return ENOBUFS;
   9515 	}
   9516 
   9517 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   9518 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   9519 	if (m->m_flags & M_MCAST)
   9520 		if_statinc_ref(nsr, if_omcasts);
   9521 	IF_STAT_PUTREF(ifp);
   9522 
   9523 	/*
   9524 	 * The situations which this mutex_tryenter() fails at running time
   9525 	 * are below two patterns.
   9526 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   9527 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   9528 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   9529 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   9530 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   9531 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   9532 	 * stuck, either.
   9533 	 */
   9534 	if (mutex_tryenter(txq->txq_lock)) {
   9535 		if (!txq->txq_stopping)
   9536 			wm_nq_transmit_locked(ifp, txq);
   9537 		mutex_exit(txq->txq_lock);
   9538 	}
   9539 
   9540 	return 0;
   9541 }
   9542 
   9543 static void
   9544 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   9545 {
   9546 
   9547 	wm_nq_send_common_locked(ifp, txq, true);
   9548 }
   9549 
   9550 static void
   9551 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   9552     bool is_transmit)
   9553 {
   9554 	struct wm_softc *sc = ifp->if_softc;
   9555 	struct mbuf *m0;
   9556 	struct wm_txsoft *txs;
   9557 	bus_dmamap_t dmamap;
   9558 	int error, nexttx, lasttx = -1, seg, segs_needed;
   9559 	bool do_csum, sent;
   9560 	bool remap = true;
   9561 
   9562 	KASSERT(mutex_owned(txq->txq_lock));
   9563 	KASSERT(!txq->txq_stopping);
   9564 
   9565 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   9566 		return;
   9567 
   9568 	if (__predict_false(wm_linkdown_discard(txq))) {
   9569 		do {
   9570 			if (is_transmit)
   9571 				m0 = pcq_get(txq->txq_interq);
   9572 			else
   9573 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   9574 			/*
   9575 			 * increment successed packet counter as in the case
   9576 			 * which the packet is discarded by link down PHY.
   9577 			 */
   9578 			if (m0 != NULL) {
   9579 				if_statinc(ifp, if_opackets);
   9580 				m_freem(m0);
   9581 			}
   9582 		} while (m0 != NULL);
   9583 		return;
   9584 	}
   9585 
   9586 	sent = false;
   9587 
   9588 	/*
   9589 	 * Loop through the send queue, setting up transmit descriptors
   9590 	 * until we drain the queue, or use up all available transmit
   9591 	 * descriptors.
   9592 	 */
   9593 	for (;;) {
   9594 		m0 = NULL;
   9595 
   9596 		/* Get a work queue entry. */
   9597 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   9598 			wm_txeof(txq, UINT_MAX);
   9599 			if (txq->txq_sfree == 0) {
   9600 				DPRINTF(sc, WM_DEBUG_TX,
   9601 				    ("%s: TX: no free job descriptors\n",
   9602 					device_xname(sc->sc_dev)));
   9603 				WM_Q_EVCNT_INCR(txq, txsstall);
   9604 				break;
   9605 			}
   9606 		}
   9607 
   9608 		/* Grab a packet off the queue. */
   9609 		if (is_transmit)
   9610 			m0 = pcq_get(txq->txq_interq);
   9611 		else
   9612 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   9613 		if (m0 == NULL)
   9614 			break;
   9615 
   9616 		DPRINTF(sc, WM_DEBUG_TX,
   9617 		    ("%s: TX: have packet to transmit: %p\n",
   9618 			device_xname(sc->sc_dev), m0));
   9619 
   9620 		txs = &txq->txq_soft[txq->txq_snext];
   9621 		dmamap = txs->txs_dmamap;
   9622 
   9623 		/*
   9624 		 * Load the DMA map.  If this fails, the packet either
   9625 		 * didn't fit in the allotted number of segments, or we
   9626 		 * were short on resources.  For the too-many-segments
   9627 		 * case, we simply report an error and drop the packet,
   9628 		 * since we can't sanely copy a jumbo packet to a single
   9629 		 * buffer.
   9630 		 */
   9631 retry:
   9632 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   9633 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   9634 		if (__predict_false(error)) {
   9635 			if (error == EFBIG) {
   9636 				if (remap == true) {
   9637 					struct mbuf *m;
   9638 
   9639 					remap = false;
   9640 					m = m_defrag(m0, M_NOWAIT);
   9641 					if (m != NULL) {
   9642 						WM_Q_EVCNT_INCR(txq, defrag);
   9643 						m0 = m;
   9644 						goto retry;
   9645 					}
   9646 				}
   9647 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   9648 				log(LOG_ERR, "%s: Tx packet consumes too many "
   9649 				    "DMA segments, dropping...\n",
   9650 				    device_xname(sc->sc_dev));
   9651 				wm_dump_mbuf_chain(sc, m0);
   9652 				m_freem(m0);
   9653 				continue;
   9654 			}
   9655 			/* Short on resources, just stop for now. */
   9656 			DPRINTF(sc, WM_DEBUG_TX,
   9657 			    ("%s: TX: dmamap load failed: %d\n",
   9658 				device_xname(sc->sc_dev), error));
   9659 			break;
   9660 		}
   9661 
   9662 		segs_needed = dmamap->dm_nsegs;
   9663 
   9664 		/*
   9665 		 * Ensure we have enough descriptors free to describe
   9666 		 * the packet. Note, we always reserve one descriptor
   9667 		 * at the end of the ring due to the semantics of the
   9668 		 * TDT register, plus one more in the event we need
   9669 		 * to load offload context.
   9670 		 */
   9671 		if (segs_needed > txq->txq_free - 2) {
   9672 			/*
   9673 			 * Not enough free descriptors to transmit this
   9674 			 * packet.  We haven't committed anything yet,
   9675 			 * so just unload the DMA map, put the packet
   9676 			 * pack on the queue, and punt. Notify the upper
   9677 			 * layer that there are no more slots left.
   9678 			 */
   9679 			DPRINTF(sc, WM_DEBUG_TX,
   9680 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   9681 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   9682 				segs_needed, txq->txq_free - 1));
   9683 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   9684 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   9685 			WM_Q_EVCNT_INCR(txq, txdstall);
   9686 			break;
   9687 		}
   9688 
   9689 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   9690 
   9691 		DPRINTF(sc, WM_DEBUG_TX,
   9692 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   9693 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   9694 
   9695 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   9696 
   9697 		/*
   9698 		 * Store a pointer to the packet so that we can free it
   9699 		 * later.
   9700 		 *
   9701 		 * Initially, we consider the number of descriptors the
   9702 		 * packet uses the number of DMA segments.  This may be
   9703 		 * incremented by 1 if we do checksum offload (a descriptor
   9704 		 * is used to set the checksum context).
   9705 		 */
   9706 		txs->txs_mbuf = m0;
   9707 		txs->txs_firstdesc = txq->txq_next;
   9708 		txs->txs_ndesc = segs_needed;
   9709 
   9710 		/* Set up offload parameters for this packet. */
   9711 		uint32_t cmdlen, fields, dcmdlen;
   9712 		if (m0->m_pkthdr.csum_flags &
   9713 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   9714 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9715 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   9716 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   9717 			    &do_csum);
   9718 		} else {
   9719 			do_csum = false;
   9720 			cmdlen = 0;
   9721 			fields = 0;
   9722 		}
   9723 
   9724 		/* Sync the DMA map. */
   9725 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   9726 		    BUS_DMASYNC_PREWRITE);
   9727 
   9728 		/* Initialize the first transmit descriptor. */
   9729 		nexttx = txq->txq_next;
   9730 		if (!do_csum) {
   9731 			/* Set up a legacy descriptor */
   9732 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   9733 			    dmamap->dm_segs[0].ds_addr);
   9734 			txq->txq_descs[nexttx].wtx_cmdlen =
   9735 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   9736 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   9737 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   9738 			if (vlan_has_tag(m0)) {
   9739 				txq->txq_descs[nexttx].wtx_cmdlen |=
   9740 				    htole32(WTX_CMD_VLE);
   9741 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   9742 				    htole16(vlan_get_tag(m0));
   9743 			} else
   9744 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   9745 
   9746 			dcmdlen = 0;
   9747 		} else {
   9748 			/* Set up an advanced data descriptor */
   9749 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9750 			    htole64(dmamap->dm_segs[0].ds_addr);
   9751 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   9752 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9753 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   9754 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   9755 			    htole32(fields);
   9756 			DPRINTF(sc, WM_DEBUG_TX,
   9757 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   9758 				device_xname(sc->sc_dev), nexttx,
   9759 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   9760 			DPRINTF(sc, WM_DEBUG_TX,
   9761 			    ("\t 0x%08x%08x\n", fields,
   9762 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   9763 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   9764 		}
   9765 
   9766 		lasttx = nexttx;
   9767 		nexttx = WM_NEXTTX(txq, nexttx);
   9768 		/*
   9769 		 * Fill in the next descriptors. Legacy or advanced format
   9770 		 * is the same here.
   9771 		 */
   9772 		for (seg = 1; seg < dmamap->dm_nsegs;
   9773 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   9774 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9775 			    htole64(dmamap->dm_segs[seg].ds_addr);
   9776 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9777 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   9778 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   9779 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   9780 			lasttx = nexttx;
   9781 
   9782 			DPRINTF(sc, WM_DEBUG_TX,
   9783 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   9784 				device_xname(sc->sc_dev), nexttx,
   9785 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   9786 				dmamap->dm_segs[seg].ds_len));
   9787 		}
   9788 
   9789 		KASSERT(lasttx != -1);
   9790 
   9791 		/*
   9792 		 * Set up the command byte on the last descriptor of
   9793 		 * the packet. If we're in the interrupt delay window,
   9794 		 * delay the interrupt.
   9795 		 */
   9796 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   9797 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   9798 		txq->txq_descs[lasttx].wtx_cmdlen |=
   9799 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   9800 
   9801 		txs->txs_lastdesc = lasttx;
   9802 
   9803 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   9804 		    device_xname(sc->sc_dev),
   9805 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   9806 
   9807 		/* Sync the descriptors we're using. */
   9808 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   9809 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   9810 
   9811 		/* Give the packet to the chip. */
   9812 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   9813 		sent = true;
   9814 
   9815 		DPRINTF(sc, WM_DEBUG_TX,
   9816 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   9817 
   9818 		DPRINTF(sc, WM_DEBUG_TX,
   9819 		    ("%s: TX: finished transmitting packet, job %d\n",
   9820 			device_xname(sc->sc_dev), txq->txq_snext));
   9821 
   9822 		/* Advance the tx pointer. */
   9823 		txq->txq_free -= txs->txs_ndesc;
   9824 		txq->txq_next = nexttx;
   9825 
   9826 		txq->txq_sfree--;
   9827 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   9828 
   9829 		/* Pass the packet to any BPF listeners. */
   9830 		bpf_mtap(ifp, m0, BPF_D_OUT);
   9831 	}
   9832 
   9833 	if (m0 != NULL) {
   9834 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9835 		WM_Q_EVCNT_INCR(txq, descdrop);
   9836 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   9837 			__func__));
   9838 		m_freem(m0);
   9839 	}
   9840 
   9841 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   9842 		/* No more slots; notify upper layer. */
   9843 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9844 	}
   9845 
   9846 	if (sent) {
   9847 		/* Set a watchdog timer in case the chip flakes out. */
   9848 		txq->txq_lastsent = time_uptime;
   9849 		txq->txq_sending = true;
   9850 	}
   9851 }
   9852 
   9853 static void
   9854 wm_deferred_start_locked(struct wm_txqueue *txq)
   9855 {
   9856 	struct wm_softc *sc = txq->txq_sc;
   9857 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9858 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   9859 	int qid = wmq->wmq_id;
   9860 
   9861 	KASSERT(mutex_owned(txq->txq_lock));
   9862 	KASSERT(!txq->txq_stopping);
   9863 
   9864 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   9865 		/* XXX need for ALTQ or one CPU system */
   9866 		if (qid == 0)
   9867 			wm_nq_start_locked(ifp);
   9868 		wm_nq_transmit_locked(ifp, txq);
   9869 	} else {
   9870 		/* XXX need for ALTQ or one CPU system */
   9871 		if (qid == 0)
   9872 			wm_start_locked(ifp);
   9873 		wm_transmit_locked(ifp, txq);
   9874 	}
   9875 }
   9876 
   9877 /* Interrupt */
   9878 
   9879 /*
   9880  * wm_txeof:
   9881  *
   9882  *	Helper; handle transmit interrupts.
   9883  */
   9884 static bool
   9885 wm_txeof(struct wm_txqueue *txq, u_int limit)
   9886 {
   9887 	struct wm_softc *sc = txq->txq_sc;
   9888 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9889 	struct wm_txsoft *txs;
   9890 	int count = 0;
   9891 	int i;
   9892 	uint8_t status;
   9893 	bool more = false;
   9894 
   9895 	KASSERT(mutex_owned(txq->txq_lock));
   9896 
   9897 	if (txq->txq_stopping)
   9898 		return false;
   9899 
   9900 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   9901 
   9902 	/*
   9903 	 * Go through the Tx list and free mbufs for those
   9904 	 * frames which have been transmitted.
   9905 	 */
   9906 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   9907 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   9908 		txs = &txq->txq_soft[i];
   9909 
   9910 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   9911 			device_xname(sc->sc_dev), i));
   9912 
   9913 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   9914 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9915 
   9916 		status =
   9917 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   9918 		if ((status & WTX_ST_DD) == 0) {
   9919 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   9920 			    BUS_DMASYNC_PREREAD);
   9921 			break;
   9922 		}
   9923 
   9924 		if (limit-- == 0) {
   9925 			more = true;
   9926 			DPRINTF(sc, WM_DEBUG_TX,
   9927 			    ("%s: TX: loop limited, job %d is not processed\n",
   9928 				device_xname(sc->sc_dev), i));
   9929 			break;
   9930 		}
   9931 
   9932 		count++;
   9933 		DPRINTF(sc, WM_DEBUG_TX,
   9934 		    ("%s: TX: job %d done: descs %d..%d\n",
   9935 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   9936 		    txs->txs_lastdesc));
   9937 
   9938 #ifdef WM_EVENT_COUNTERS
   9939 		if ((status & WTX_ST_TU) && (sc->sc_type <= WM_T_82544))
   9940 			WM_Q_EVCNT_INCR(txq, underrun);
   9941 #endif /* WM_EVENT_COUNTERS */
   9942 
   9943 		/*
   9944 		 * 82574 and newer's document says the status field has neither
   9945 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   9946 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   9947 		 * Developer's Manual", 82574 datasheet and newer.
   9948 		 *
   9949 		 * XXX I saw the LC bit was set on I218 even though the media
   9950 		 * was full duplex, so the bit might be used for other
   9951 		 * meaning ...(I have no document).
   9952 		 */
   9953 
   9954 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   9955 		    && ((sc->sc_type < WM_T_82574)
   9956 			|| (sc->sc_type == WM_T_80003))) {
   9957 			if_statinc(ifp, if_oerrors);
   9958 			if (status & WTX_ST_LC)
   9959 				log(LOG_WARNING, "%s: late collision\n",
   9960 				    device_xname(sc->sc_dev));
   9961 			else if (status & WTX_ST_EC) {
   9962 				if_statadd(ifp, if_collisions,
   9963 				    TX_COLLISION_THRESHOLD + 1);
   9964 				log(LOG_WARNING, "%s: excessive collisions\n",
   9965 				    device_xname(sc->sc_dev));
   9966 			}
   9967 		} else
   9968 			if_statinc(ifp, if_opackets);
   9969 
   9970 		txq->txq_packets++;
   9971 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   9972 
   9973 		txq->txq_free += txs->txs_ndesc;
   9974 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   9975 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   9976 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   9977 		m_freem(txs->txs_mbuf);
   9978 		txs->txs_mbuf = NULL;
   9979 	}
   9980 
   9981 	/* Update the dirty transmit buffer pointer. */
   9982 	txq->txq_sdirty = i;
   9983 	DPRINTF(sc, WM_DEBUG_TX,
   9984 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   9985 
   9986 	if (count != 0)
   9987 		rnd_add_uint32(&sc->rnd_source, count);
   9988 
   9989 	/*
   9990 	 * If there are no more pending transmissions, cancel the watchdog
   9991 	 * timer.
   9992 	 */
   9993 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   9994 		txq->txq_sending = false;
   9995 
   9996 	return more;
   9997 }
   9998 
   9999 static inline uint32_t
   10000 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   10001 {
   10002 	struct wm_softc *sc = rxq->rxq_sc;
   10003 
   10004 	if (sc->sc_type == WM_T_82574)
   10005 		return EXTRXC_STATUS(
   10006 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   10007 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   10008 		return NQRXC_STATUS(
   10009 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   10010 	else
   10011 		return rxq->rxq_descs[idx].wrx_status;
   10012 }
   10013 
   10014 static inline uint32_t
   10015 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   10016 {
   10017 	struct wm_softc *sc = rxq->rxq_sc;
   10018 
   10019 	if (sc->sc_type == WM_T_82574)
   10020 		return EXTRXC_ERROR(
   10021 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   10022 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   10023 		return NQRXC_ERROR(
   10024 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   10025 	else
   10026 		return rxq->rxq_descs[idx].wrx_errors;
   10027 }
   10028 
   10029 static inline uint16_t
   10030 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   10031 {
   10032 	struct wm_softc *sc = rxq->rxq_sc;
   10033 
   10034 	if (sc->sc_type == WM_T_82574)
   10035 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   10036 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   10037 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   10038 	else
   10039 		return rxq->rxq_descs[idx].wrx_special;
   10040 }
   10041 
   10042 static inline int
   10043 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   10044 {
   10045 	struct wm_softc *sc = rxq->rxq_sc;
   10046 
   10047 	if (sc->sc_type == WM_T_82574)
   10048 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   10049 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   10050 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   10051 	else
   10052 		return rxq->rxq_descs[idx].wrx_len;
   10053 }
   10054 
   10055 #ifdef WM_DEBUG
   10056 static inline uint32_t
   10057 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   10058 {
   10059 	struct wm_softc *sc = rxq->rxq_sc;
   10060 
   10061 	if (sc->sc_type == WM_T_82574)
   10062 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   10063 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   10064 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   10065 	else
   10066 		return 0;
   10067 }
   10068 
   10069 static inline uint8_t
   10070 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   10071 {
   10072 	struct wm_softc *sc = rxq->rxq_sc;
   10073 
   10074 	if (sc->sc_type == WM_T_82574)
   10075 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   10076 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   10077 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   10078 	else
   10079 		return 0;
   10080 }
   10081 #endif /* WM_DEBUG */
   10082 
   10083 static inline bool
   10084 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   10085     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   10086 {
   10087 
   10088 	if (sc->sc_type == WM_T_82574)
   10089 		return (status & ext_bit) != 0;
   10090 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   10091 		return (status & nq_bit) != 0;
   10092 	else
   10093 		return (status & legacy_bit) != 0;
   10094 }
   10095 
   10096 static inline bool
   10097 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   10098     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   10099 {
   10100 
   10101 	if (sc->sc_type == WM_T_82574)
   10102 		return (error & ext_bit) != 0;
   10103 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   10104 		return (error & nq_bit) != 0;
   10105 	else
   10106 		return (error & legacy_bit) != 0;
   10107 }
   10108 
   10109 static inline bool
   10110 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   10111 {
   10112 
   10113 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   10114 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   10115 		return true;
   10116 	else
   10117 		return false;
   10118 }
   10119 
   10120 static inline bool
   10121 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   10122 {
   10123 	struct wm_softc *sc = rxq->rxq_sc;
   10124 
   10125 	/* XXX missing error bit for newqueue? */
   10126 	if (wm_rxdesc_is_set_error(sc, errors,
   10127 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   10128 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   10129 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   10130 		NQRXC_ERROR_RXE)) {
   10131 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   10132 		    EXTRXC_ERROR_SE, 0))
   10133 			log(LOG_WARNING, "%s: symbol error\n",
   10134 			    device_xname(sc->sc_dev));
   10135 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   10136 		    EXTRXC_ERROR_SEQ, 0))
   10137 			log(LOG_WARNING, "%s: receive sequence error\n",
   10138 			    device_xname(sc->sc_dev));
   10139 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   10140 		    EXTRXC_ERROR_CE, 0))
   10141 			log(LOG_WARNING, "%s: CRC error\n",
   10142 			    device_xname(sc->sc_dev));
   10143 		return true;
   10144 	}
   10145 
   10146 	return false;
   10147 }
   10148 
   10149 static inline bool
   10150 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   10151 {
   10152 	struct wm_softc *sc = rxq->rxq_sc;
   10153 
   10154 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   10155 		NQRXC_STATUS_DD)) {
   10156 		/* We have processed all of the receive descriptors. */
   10157 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   10158 		return false;
   10159 	}
   10160 
   10161 	return true;
   10162 }
   10163 
   10164 static inline bool
   10165 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   10166     uint16_t vlantag, struct mbuf *m)
   10167 {
   10168 
   10169 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   10170 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   10171 		vlan_set_tag(m, le16toh(vlantag));
   10172 	}
   10173 
   10174 	return true;
   10175 }
   10176 
   10177 static inline void
   10178 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   10179     uint32_t errors, struct mbuf *m)
   10180 {
   10181 	struct wm_softc *sc = rxq->rxq_sc;
   10182 
   10183 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   10184 		if (wm_rxdesc_is_set_status(sc, status,
   10185 		    WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   10186 			WM_Q_EVCNT_INCR(rxq, ipsum);
   10187 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   10188 			if (wm_rxdesc_is_set_error(sc, errors,
   10189 			    WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   10190 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   10191 		}
   10192 		if (wm_rxdesc_is_set_status(sc, status,
   10193 		    WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   10194 			/*
   10195 			 * Note: we don't know if this was TCP or UDP,
   10196 			 * so we just set both bits, and expect the
   10197 			 * upper layers to deal.
   10198 			 */
   10199 			WM_Q_EVCNT_INCR(rxq, tusum);
   10200 			m->m_pkthdr.csum_flags |=
   10201 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   10202 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   10203 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   10204 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   10205 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   10206 		}
   10207 	}
   10208 }
   10209 
   10210 /*
   10211  * wm_rxeof:
   10212  *
   10213  *	Helper; handle receive interrupts.
   10214  */
   10215 static bool
   10216 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   10217 {
   10218 	struct wm_softc *sc = rxq->rxq_sc;
   10219 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10220 	struct wm_rxsoft *rxs;
   10221 	struct mbuf *m;
   10222 	int i, len;
   10223 	int count = 0;
   10224 	uint32_t status, errors;
   10225 	uint16_t vlantag;
   10226 	bool more = false;
   10227 
   10228 	KASSERT(mutex_owned(rxq->rxq_lock));
   10229 
   10230 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   10231 		rxs = &rxq->rxq_soft[i];
   10232 
   10233 		DPRINTF(sc, WM_DEBUG_RX,
   10234 		    ("%s: RX: checking descriptor %d\n",
   10235 			device_xname(sc->sc_dev), i));
   10236 		wm_cdrxsync(rxq, i,
   10237 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   10238 
   10239 		status = wm_rxdesc_get_status(rxq, i);
   10240 		errors = wm_rxdesc_get_errors(rxq, i);
   10241 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   10242 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   10243 #ifdef WM_DEBUG
   10244 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   10245 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   10246 #endif
   10247 
   10248 		if (!wm_rxdesc_dd(rxq, i, status))
   10249 			break;
   10250 
   10251 		if (limit-- == 0) {
   10252 			more = true;
   10253 			DPRINTF(sc, WM_DEBUG_RX,
   10254 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   10255 				device_xname(sc->sc_dev), i));
   10256 			break;
   10257 		}
   10258 
   10259 		count++;
   10260 		if (__predict_false(rxq->rxq_discard)) {
   10261 			DPRINTF(sc, WM_DEBUG_RX,
   10262 			    ("%s: RX: discarding contents of descriptor %d\n",
   10263 				device_xname(sc->sc_dev), i));
   10264 			wm_init_rxdesc(rxq, i);
   10265 			if (wm_rxdesc_is_eop(rxq, status)) {
   10266 				/* Reset our state. */
   10267 				DPRINTF(sc, WM_DEBUG_RX,
   10268 				    ("%s: RX: resetting rxdiscard -> 0\n",
   10269 					device_xname(sc->sc_dev)));
   10270 				rxq->rxq_discard = 0;
   10271 			}
   10272 			continue;
   10273 		}
   10274 
   10275 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   10276 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   10277 
   10278 		m = rxs->rxs_mbuf;
   10279 
   10280 		/*
   10281 		 * Add a new receive buffer to the ring, unless of
   10282 		 * course the length is zero. Treat the latter as a
   10283 		 * failed mapping.
   10284 		 */
   10285 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   10286 			/*
   10287 			 * Failed, throw away what we've done so
   10288 			 * far, and discard the rest of the packet.
   10289 			 */
   10290 			if_statinc(ifp, if_ierrors);
   10291 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   10292 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   10293 			wm_init_rxdesc(rxq, i);
   10294 			if (!wm_rxdesc_is_eop(rxq, status))
   10295 				rxq->rxq_discard = 1;
   10296 			if (rxq->rxq_head != NULL)
   10297 				m_freem(rxq->rxq_head);
   10298 			WM_RXCHAIN_RESET(rxq);
   10299 			DPRINTF(sc, WM_DEBUG_RX,
   10300 			    ("%s: RX: Rx buffer allocation failed, "
   10301 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   10302 				rxq->rxq_discard ? " (discard)" : ""));
   10303 			continue;
   10304 		}
   10305 
   10306 		m->m_len = len;
   10307 		rxq->rxq_len += len;
   10308 		DPRINTF(sc, WM_DEBUG_RX,
   10309 		    ("%s: RX: buffer at %p len %d\n",
   10310 			device_xname(sc->sc_dev), m->m_data, len));
   10311 
   10312 		/* If this is not the end of the packet, keep looking. */
   10313 		if (!wm_rxdesc_is_eop(rxq, status)) {
   10314 			WM_RXCHAIN_LINK(rxq, m);
   10315 			DPRINTF(sc, WM_DEBUG_RX,
   10316 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   10317 				device_xname(sc->sc_dev), rxq->rxq_len));
   10318 			continue;
   10319 		}
   10320 
   10321 		/*
   10322 		 * Okay, we have the entire packet now. The chip is
   10323 		 * configured to include the FCS except I35[04], I21[01].
   10324 		 * (not all chips can be configured to strip it), so we need
   10325 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
   10326 		 * in RCTL register is always set, so we don't trim it.
   10327 		 * PCH2 and newer chip also not include FCS when jumbo
   10328 		 * frame is used to do workaround an errata.
   10329 		 * May need to adjust length of previous mbuf in the
   10330 		 * chain if the current mbuf is too short.
   10331 		 */
   10332 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
   10333 			if (m->m_len < ETHER_CRC_LEN) {
   10334 				rxq->rxq_tail->m_len
   10335 				    -= (ETHER_CRC_LEN - m->m_len);
   10336 				m->m_len = 0;
   10337 			} else
   10338 				m->m_len -= ETHER_CRC_LEN;
   10339 			len = rxq->rxq_len - ETHER_CRC_LEN;
   10340 		} else
   10341 			len = rxq->rxq_len;
   10342 
   10343 		WM_RXCHAIN_LINK(rxq, m);
   10344 
   10345 		*rxq->rxq_tailp = NULL;
   10346 		m = rxq->rxq_head;
   10347 
   10348 		WM_RXCHAIN_RESET(rxq);
   10349 
   10350 		DPRINTF(sc, WM_DEBUG_RX,
   10351 		    ("%s: RX: have entire packet, len -> %d\n",
   10352 			device_xname(sc->sc_dev), len));
   10353 
   10354 		/* If an error occurred, update stats and drop the packet. */
   10355 		if (wm_rxdesc_has_errors(rxq, errors)) {
   10356 			m_freem(m);
   10357 			continue;
   10358 		}
   10359 
   10360 		/* No errors.  Receive the packet. */
   10361 		m_set_rcvif(m, ifp);
   10362 		m->m_pkthdr.len = len;
   10363 		/*
   10364 		 * TODO
   10365 		 * should be save rsshash and rsstype to this mbuf.
   10366 		 */
   10367 		DPRINTF(sc, WM_DEBUG_RX,
   10368 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   10369 			device_xname(sc->sc_dev), rsstype, rsshash));
   10370 
   10371 		/*
   10372 		 * If VLANs are enabled, VLAN packets have been unwrapped
   10373 		 * for us.  Associate the tag with the packet.
   10374 		 */
   10375 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   10376 			continue;
   10377 
   10378 		/* Set up checksum info for this packet. */
   10379 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   10380 
   10381 		rxq->rxq_packets++;
   10382 		rxq->rxq_bytes += len;
   10383 		/* Pass it on. */
   10384 		if_percpuq_enqueue(sc->sc_ipq, m);
   10385 
   10386 		if (rxq->rxq_stopping)
   10387 			break;
   10388 	}
   10389 	rxq->rxq_ptr = i;
   10390 
   10391 	if (count != 0)
   10392 		rnd_add_uint32(&sc->rnd_source, count);
   10393 
   10394 	DPRINTF(sc, WM_DEBUG_RX,
   10395 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   10396 
   10397 	return more;
   10398 }
   10399 
   10400 /*
   10401  * wm_linkintr_gmii:
   10402  *
   10403  *	Helper; handle link interrupts for GMII.
   10404  */
   10405 static void
   10406 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   10407 {
   10408 	device_t dev = sc->sc_dev;
   10409 	uint32_t status, reg;
   10410 	bool link;
   10411 	bool dopoll = true;
   10412 	int rv;
   10413 
   10414 	KASSERT(mutex_owned(sc->sc_core_lock));
   10415 
   10416 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   10417 		__func__));
   10418 
   10419 	if ((icr & ICR_LSC) == 0) {
   10420 		if (icr & ICR_RXSEQ)
   10421 			DPRINTF(sc, WM_DEBUG_LINK,
   10422 			    ("%s: LINK Receive sequence error\n",
   10423 				device_xname(dev)));
   10424 		return;
   10425 	}
   10426 
   10427 	/* Link status changed */
   10428 	status = CSR_READ(sc, WMREG_STATUS);
   10429 	link = status & STATUS_LU;
   10430 	if (link) {
   10431 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   10432 			device_xname(dev),
   10433 			(status & STATUS_FD) ? "FDX" : "HDX"));
   10434 		if (wm_phy_need_linkdown_discard(sc)) {
   10435 			DPRINTF(sc, WM_DEBUG_LINK,
   10436 			    ("%s: linkintr: Clear linkdown discard flag\n",
   10437 				device_xname(dev)));
   10438 			wm_clear_linkdown_discard(sc);
   10439 		}
   10440 	} else {
   10441 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10442 			device_xname(dev)));
   10443 		if (wm_phy_need_linkdown_discard(sc)) {
   10444 			DPRINTF(sc, WM_DEBUG_LINK,
   10445 			    ("%s: linkintr: Set linkdown discard flag\n",
   10446 				device_xname(dev)));
   10447 			wm_set_linkdown_discard(sc);
   10448 		}
   10449 	}
   10450 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   10451 		wm_gig_downshift_workaround_ich8lan(sc);
   10452 
   10453 	if ((sc->sc_type == WM_T_ICH8) && (sc->sc_phytype == WMPHY_IGP_3))
   10454 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   10455 
   10456 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   10457 		device_xname(dev)));
   10458 	if ((sc->sc_flags & WM_F_DELAY_LINKUP) != 0) {
   10459 		if (link) {
   10460 			/*
   10461 			 * To workaround the problem, it's required to wait
   10462 			 * several hundred miliseconds. The time depend
   10463 			 * on the environment. Wait 1 second for the safety.
   10464 			 */
   10465 			dopoll = false;
   10466 			getmicrotime(&sc->sc_linkup_delay_time);
   10467 			sc->sc_linkup_delay_time.tv_sec += 1;
   10468 		} else if (sc->sc_linkup_delay_time.tv_sec != 0) {
   10469 			/*
   10470 			 * Simplify by checking tv_sec only. It's enough.
   10471 			 *
   10472 			 * Currently, it's not required to clear the time.
   10473 			 * It's just to know the timer is stopped
   10474 			 * (for debugging).
   10475 			 */
   10476 
   10477 			sc->sc_linkup_delay_time.tv_sec = 0;
   10478 			sc->sc_linkup_delay_time.tv_usec = 0;
   10479 		}
   10480 	}
   10481 
   10482 	/*
   10483 	 * Call mii_pollstat().
   10484 	 *
   10485 	 * Some (not all) systems use I35[04] or I21[01] don't send packet soon
   10486 	 * after linkup. The MAC send a packet to the PHY and any error is not
   10487 	 * observed. This behavior causes a problem that gratuitous ARP and/or
   10488 	 * IPv6 DAD packet are silently dropped. To avoid this problem, don't
   10489 	 * call mii_pollstat() here which will send LINK_STATE_UP notification
   10490 	 * to the upper layer. Instead, mii_pollstat() will be called in
   10491 	 * wm_gmii_mediastatus() or mii_tick() will be called in wm_tick().
   10492 	 */
   10493 	if (dopoll)
   10494 		mii_pollstat(&sc->sc_mii);
   10495 
   10496 	/* Do some workarounds soon after link status is changed. */
   10497 
   10498 	if (sc->sc_type == WM_T_82543) {
   10499 		int miistatus, active;
   10500 
   10501 		/*
   10502 		 * With 82543, we need to force speed and
   10503 		 * duplex on the MAC equal to what the PHY
   10504 		 * speed and duplex configuration is.
   10505 		 */
   10506 		miistatus = sc->sc_mii.mii_media_status;
   10507 
   10508 		if (miistatus & IFM_ACTIVE) {
   10509 			active = sc->sc_mii.mii_media_active;
   10510 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10511 			switch (IFM_SUBTYPE(active)) {
   10512 			case IFM_10_T:
   10513 				sc->sc_ctrl |= CTRL_SPEED_10;
   10514 				break;
   10515 			case IFM_100_TX:
   10516 				sc->sc_ctrl |= CTRL_SPEED_100;
   10517 				break;
   10518 			case IFM_1000_T:
   10519 				sc->sc_ctrl |= CTRL_SPEED_1000;
   10520 				break;
   10521 			default:
   10522 				/*
   10523 				 * Fiber?
   10524 				 * Shoud not enter here.
   10525 				 */
   10526 				device_printf(dev, "unknown media (%x)\n",
   10527 				    active);
   10528 				break;
   10529 			}
   10530 			if (active & IFM_FDX)
   10531 				sc->sc_ctrl |= CTRL_FD;
   10532 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10533 		}
   10534 	} else if (sc->sc_type == WM_T_PCH) {
   10535 		wm_k1_gig_workaround_hv(sc,
   10536 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   10537 	}
   10538 
   10539 	/*
   10540 	 * When connected at 10Mbps half-duplex, some parts are excessively
   10541 	 * aggressive resulting in many collisions. To avoid this, increase
   10542 	 * the IPG and reduce Rx latency in the PHY.
   10543 	 */
   10544 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_TGP)
   10545 	    && link) {
   10546 		uint32_t tipg_reg;
   10547 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   10548 		bool fdx;
   10549 		uint16_t emi_addr, emi_val;
   10550 
   10551 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   10552 		tipg_reg &= ~TIPG_IPGT_MASK;
   10553 		fdx = status & STATUS_FD;
   10554 
   10555 		if (!fdx && (speed == STATUS_SPEED_10)) {
   10556 			tipg_reg |= 0xff;
   10557 			/* Reduce Rx latency in analog PHY */
   10558 			emi_val = 0;
   10559 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   10560 		    fdx && speed != STATUS_SPEED_1000) {
   10561 			tipg_reg |= 0xc;
   10562 			emi_val = 1;
   10563 		} else {
   10564 			/* Roll back the default values */
   10565 			tipg_reg |= 0x08;
   10566 			emi_val = 1;
   10567 		}
   10568 
   10569 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   10570 
   10571 		rv = sc->phy.acquire(sc);
   10572 		if (rv)
   10573 			return;
   10574 
   10575 		if (sc->sc_type == WM_T_PCH2)
   10576 			emi_addr = I82579_RX_CONFIG;
   10577 		else
   10578 			emi_addr = I217_RX_CONFIG;
   10579 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   10580 
   10581 		if (sc->sc_type >= WM_T_PCH_LPT) {
   10582 			uint16_t phy_reg;
   10583 
   10584 			sc->phy.readreg_locked(dev, 2,
   10585 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   10586 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   10587 			if (speed == STATUS_SPEED_100
   10588 			    || speed == STATUS_SPEED_10)
   10589 				phy_reg |= 0x3e8;
   10590 			else
   10591 				phy_reg |= 0xfa;
   10592 			sc->phy.writereg_locked(dev, 2,
   10593 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   10594 
   10595 			if (speed == STATUS_SPEED_1000) {
   10596 				sc->phy.readreg_locked(dev, 2,
   10597 				    HV_PM_CTRL, &phy_reg);
   10598 
   10599 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   10600 
   10601 				sc->phy.writereg_locked(dev, 2,
   10602 				    HV_PM_CTRL, phy_reg);
   10603 			}
   10604 		}
   10605 		sc->phy.release(sc);
   10606 
   10607 		if (rv)
   10608 			return;
   10609 
   10610 		if (sc->sc_type >= WM_T_PCH_SPT) {
   10611 			uint16_t data, ptr_gap;
   10612 
   10613 			if (speed == STATUS_SPEED_1000) {
   10614 				rv = sc->phy.acquire(sc);
   10615 				if (rv)
   10616 					return;
   10617 
   10618 				rv = sc->phy.readreg_locked(dev, 2,
   10619 				    I82579_UNKNOWN1, &data);
   10620 				if (rv) {
   10621 					sc->phy.release(sc);
   10622 					return;
   10623 				}
   10624 
   10625 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   10626 				if (ptr_gap < 0x18) {
   10627 					data &= ~(0x3ff << 2);
   10628 					data |= (0x18 << 2);
   10629 					rv = sc->phy.writereg_locked(dev,
   10630 					    2, I82579_UNKNOWN1, data);
   10631 				}
   10632 				sc->phy.release(sc);
   10633 				if (rv)
   10634 					return;
   10635 			} else {
   10636 				rv = sc->phy.acquire(sc);
   10637 				if (rv)
   10638 					return;
   10639 
   10640 				rv = sc->phy.writereg_locked(dev, 2,
   10641 				    I82579_UNKNOWN1, 0xc023);
   10642 				sc->phy.release(sc);
   10643 				if (rv)
   10644 					return;
   10645 
   10646 			}
   10647 		}
   10648 	}
   10649 
   10650 	/*
   10651 	 * I217 Packet Loss issue:
   10652 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   10653 	 * on power up.
   10654 	 * Set the Beacon Duration for I217 to 8 usec
   10655 	 */
   10656 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10657 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   10658 		reg &= ~FEXTNVM4_BEACON_DURATION;
   10659 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   10660 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   10661 	}
   10662 
   10663 	/* Work-around I218 hang issue */
   10664 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   10665 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   10666 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   10667 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   10668 		wm_k1_workaround_lpt_lp(sc, link);
   10669 
   10670 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10671 		/*
   10672 		 * Set platform power management values for Latency
   10673 		 * Tolerance Reporting (LTR)
   10674 		 */
   10675 		wm_platform_pm_pch_lpt(sc,
   10676 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   10677 	}
   10678 
   10679 	/* Clear link partner's EEE ability */
   10680 	sc->eee_lp_ability = 0;
   10681 
   10682 	/* FEXTNVM6 K1-off workaround */
   10683 	if (sc->sc_type == WM_T_PCH_SPT) {
   10684 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   10685 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   10686 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   10687 		else
   10688 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   10689 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   10690 	}
   10691 
   10692 	if (!link)
   10693 		return;
   10694 
   10695 	switch (sc->sc_type) {
   10696 	case WM_T_PCH2:
   10697 		wm_k1_workaround_lv(sc);
   10698 		/* FALLTHROUGH */
   10699 	case WM_T_PCH:
   10700 		if (sc->sc_phytype == WMPHY_82578)
   10701 			wm_link_stall_workaround_hv(sc);
   10702 		break;
   10703 	default:
   10704 		break;
   10705 	}
   10706 
   10707 	/* Enable/Disable EEE after link up */
   10708 	if (sc->sc_phytype > WMPHY_82579)
   10709 		wm_set_eee_pchlan(sc);
   10710 }
   10711 
   10712 /*
   10713  * wm_linkintr_tbi:
   10714  *
   10715  *	Helper; handle link interrupts for TBI mode.
   10716  */
   10717 static void
   10718 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   10719 {
   10720 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10721 	uint32_t status;
   10722 
   10723 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10724 		__func__));
   10725 
   10726 	status = CSR_READ(sc, WMREG_STATUS);
   10727 	if (icr & ICR_LSC) {
   10728 		wm_check_for_link(sc);
   10729 		if (status & STATUS_LU) {
   10730 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   10731 				device_xname(sc->sc_dev),
   10732 				(status & STATUS_FD) ? "FDX" : "HDX"));
   10733 			/*
   10734 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10735 			 * so we should update sc->sc_ctrl
   10736 			 */
   10737 
   10738 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10739 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10740 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10741 			if (status & STATUS_FD)
   10742 				sc->sc_tctl |=
   10743 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10744 			else
   10745 				sc->sc_tctl |=
   10746 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10747 			if (sc->sc_ctrl & CTRL_TFCE)
   10748 				sc->sc_fcrtl |= FCRTL_XONE;
   10749 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10750 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10751 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   10752 			sc->sc_tbi_linkup = 1;
   10753 			if_link_state_change(ifp, LINK_STATE_UP);
   10754 		} else {
   10755 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10756 				device_xname(sc->sc_dev)));
   10757 			sc->sc_tbi_linkup = 0;
   10758 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10759 		}
   10760 		/* Update LED */
   10761 		wm_tbi_serdes_set_linkled(sc);
   10762 	} else if (icr & ICR_RXSEQ)
   10763 		DPRINTF(sc, WM_DEBUG_LINK,
   10764 		    ("%s: LINK: Receive sequence error\n",
   10765 			device_xname(sc->sc_dev)));
   10766 }
   10767 
   10768 /*
   10769  * wm_linkintr_serdes:
   10770  *
   10771  *	Helper; handle link interrupts for TBI mode.
   10772  */
   10773 static void
   10774 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   10775 {
   10776 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10777 	struct mii_data *mii = &sc->sc_mii;
   10778 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10779 	uint32_t pcs_adv, pcs_lpab, reg;
   10780 
   10781 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10782 		__func__));
   10783 
   10784 	if (icr & ICR_LSC) {
   10785 		/* Check PCS */
   10786 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10787 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   10788 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   10789 				device_xname(sc->sc_dev)));
   10790 			mii->mii_media_status |= IFM_ACTIVE;
   10791 			sc->sc_tbi_linkup = 1;
   10792 			if_link_state_change(ifp, LINK_STATE_UP);
   10793 		} else {
   10794 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10795 				device_xname(sc->sc_dev)));
   10796 			mii->mii_media_status |= IFM_NONE;
   10797 			sc->sc_tbi_linkup = 0;
   10798 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10799 			wm_tbi_serdes_set_linkled(sc);
   10800 			return;
   10801 		}
   10802 		mii->mii_media_active |= IFM_1000_SX;
   10803 		if ((reg & PCS_LSTS_FDX) != 0)
   10804 			mii->mii_media_active |= IFM_FDX;
   10805 		else
   10806 			mii->mii_media_active |= IFM_HDX;
   10807 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10808 			/* Check flow */
   10809 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10810 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10811 				DPRINTF(sc, WM_DEBUG_LINK,
   10812 				    ("XXX LINKOK but not ACOMP\n"));
   10813 				return;
   10814 			}
   10815 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10816 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10817 			DPRINTF(sc, WM_DEBUG_LINK,
   10818 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   10819 			if ((pcs_adv & TXCW_SYM_PAUSE)
   10820 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10821 				mii->mii_media_active |= IFM_FLOW
   10822 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10823 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10824 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10825 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   10826 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10827 				mii->mii_media_active |= IFM_FLOW
   10828 				    | IFM_ETH_TXPAUSE;
   10829 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   10830 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10831 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10832 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10833 				mii->mii_media_active |= IFM_FLOW
   10834 				    | IFM_ETH_RXPAUSE;
   10835 		}
   10836 		/* Update LED */
   10837 		wm_tbi_serdes_set_linkled(sc);
   10838 	} else
   10839 		DPRINTF(sc, WM_DEBUG_LINK,
   10840 		    ("%s: LINK: Receive sequence error\n",
   10841 		    device_xname(sc->sc_dev)));
   10842 }
   10843 
   10844 /*
   10845  * wm_linkintr:
   10846  *
   10847  *	Helper; handle link interrupts.
   10848  */
   10849 static void
   10850 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   10851 {
   10852 
   10853 	KASSERT(mutex_owned(sc->sc_core_lock));
   10854 
   10855 	if (sc->sc_flags & WM_F_HAS_MII)
   10856 		wm_linkintr_gmii(sc, icr);
   10857 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10858 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   10859 		wm_linkintr_serdes(sc, icr);
   10860 	else
   10861 		wm_linkintr_tbi(sc, icr);
   10862 }
   10863 
   10864 
   10865 static inline void
   10866 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   10867 {
   10868 
   10869 	if (wmq->wmq_txrx_use_workqueue) {
   10870 		if (!wmq->wmq_wq_enqueued) {
   10871 			wmq->wmq_wq_enqueued = true;
   10872 			workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie,
   10873 			    curcpu());
   10874 		}
   10875 	} else
   10876 		softint_schedule(wmq->wmq_si);
   10877 }
   10878 
   10879 static inline void
   10880 wm_legacy_intr_disable(struct wm_softc *sc)
   10881 {
   10882 
   10883 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   10884 }
   10885 
   10886 static inline void
   10887 wm_legacy_intr_enable(struct wm_softc *sc)
   10888 {
   10889 
   10890 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   10891 }
   10892 
   10893 /*
   10894  * wm_intr_legacy:
   10895  *
   10896  *	Interrupt service routine for INTx and MSI.
   10897  */
   10898 static int
   10899 wm_intr_legacy(void *arg)
   10900 {
   10901 	struct wm_softc *sc = arg;
   10902 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10903 	struct wm_queue *wmq = &sc->sc_queue[0];
   10904 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10905 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10906 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10907 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10908 	uint32_t icr, rndval = 0;
   10909 	bool more = false;
   10910 
   10911 	icr = CSR_READ(sc, WMREG_ICR);
   10912 	if ((icr & sc->sc_icr) == 0)
   10913 		return 0;
   10914 
   10915 	DPRINTF(sc, WM_DEBUG_TX,
   10916 	    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   10917 	if (rndval == 0)
   10918 		rndval = icr;
   10919 
   10920 	mutex_enter(txq->txq_lock);
   10921 
   10922 	if (txq->txq_stopping) {
   10923 		mutex_exit(txq->txq_lock);
   10924 		return 1;
   10925 	}
   10926 
   10927 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10928 	if (icr & ICR_TXDW) {
   10929 		DPRINTF(sc, WM_DEBUG_TX,
   10930 		    ("%s: TX: got TXDW interrupt\n",
   10931 			device_xname(sc->sc_dev)));
   10932 		WM_Q_EVCNT_INCR(txq, txdw);
   10933 	}
   10934 #endif
   10935 	if (txlimit > 0) {
   10936 		more |= wm_txeof(txq, txlimit);
   10937 		if (!IF_IS_EMPTY(&ifp->if_snd))
   10938 			more = true;
   10939 	} else
   10940 		more = true;
   10941 	mutex_exit(txq->txq_lock);
   10942 
   10943 	mutex_enter(rxq->rxq_lock);
   10944 
   10945 	if (rxq->rxq_stopping) {
   10946 		mutex_exit(rxq->rxq_lock);
   10947 		return 1;
   10948 	}
   10949 
   10950 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10951 	if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   10952 		DPRINTF(sc, WM_DEBUG_RX,
   10953 		    ("%s: RX: got Rx intr %#" __PRIxBIT "\n",
   10954 			device_xname(sc->sc_dev),
   10955 			icr & (ICR_RXDMT0 | ICR_RXT0)));
   10956 		WM_Q_EVCNT_INCR(rxq, intr);
   10957 	}
   10958 #endif
   10959 	if (rxlimit > 0) {
   10960 		/*
   10961 		 * wm_rxeof() does *not* call upper layer functions directly,
   10962 		 * as if_percpuq_enqueue() just call softint_schedule().
   10963 		 * So, we can call wm_rxeof() in interrupt context.
   10964 		 */
   10965 		more = wm_rxeof(rxq, rxlimit);
   10966 	} else
   10967 		more = true;
   10968 
   10969 	mutex_exit(rxq->rxq_lock);
   10970 
   10971 	mutex_enter(sc->sc_core_lock);
   10972 
   10973 	if (sc->sc_core_stopping) {
   10974 		mutex_exit(sc->sc_core_lock);
   10975 		return 1;
   10976 	}
   10977 
   10978 	if (icr & (ICR_LSC | ICR_RXSEQ)) {
   10979 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10980 		wm_linkintr(sc, icr);
   10981 	}
   10982 	if ((icr & ICR_GPI(0)) != 0)
   10983 		device_printf(sc->sc_dev, "got module interrupt\n");
   10984 
   10985 	mutex_exit(sc->sc_core_lock);
   10986 
   10987 	if (icr & ICR_RXO) {
   10988 #if defined(WM_DEBUG)
   10989 		log(LOG_WARNING, "%s: Receive overrun\n",
   10990 		    device_xname(sc->sc_dev));
   10991 #endif /* defined(WM_DEBUG) */
   10992 	}
   10993 
   10994 	rnd_add_uint32(&sc->rnd_source, rndval);
   10995 
   10996 	if (more) {
   10997 		/* Try to get more packets going. */
   10998 		wm_legacy_intr_disable(sc);
   10999 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   11000 		wm_sched_handle_queue(sc, wmq);
   11001 	}
   11002 
   11003 	return 1;
   11004 }
   11005 
   11006 static inline void
   11007 wm_txrxintr_disable(struct wm_queue *wmq)
   11008 {
   11009 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   11010 
   11011 	if (__predict_false(!wm_is_using_msix(sc))) {
   11012 		wm_legacy_intr_disable(sc);
   11013 		return;
   11014 	}
   11015 
   11016 	if (sc->sc_type == WM_T_82574)
   11017 		CSR_WRITE(sc, WMREG_IMC,
   11018 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   11019 	else if (sc->sc_type == WM_T_82575)
   11020 		CSR_WRITE(sc, WMREG_EIMC,
   11021 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   11022 	else
   11023 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   11024 }
   11025 
   11026 static inline void
   11027 wm_txrxintr_enable(struct wm_queue *wmq)
   11028 {
   11029 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   11030 
   11031 	wm_itrs_calculate(sc, wmq);
   11032 
   11033 	if (__predict_false(!wm_is_using_msix(sc))) {
   11034 		wm_legacy_intr_enable(sc);
   11035 		return;
   11036 	}
   11037 
   11038 	/*
   11039 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   11040 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   11041 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   11042 	 * while each wm_handle_queue(wmq) is runnig.
   11043 	 */
   11044 	if (sc->sc_type == WM_T_82574)
   11045 		CSR_WRITE(sc, WMREG_IMS,
   11046 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   11047 	else if (sc->sc_type == WM_T_82575)
   11048 		CSR_WRITE(sc, WMREG_EIMS,
   11049 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   11050 	else
   11051 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   11052 }
   11053 
   11054 static int
   11055 wm_txrxintr_msix(void *arg)
   11056 {
   11057 	struct wm_queue *wmq = arg;
   11058 	struct wm_txqueue *txq = &wmq->wmq_txq;
   11059 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   11060 	struct wm_softc *sc = txq->txq_sc;
   11061 	u_int txlimit = sc->sc_tx_intr_process_limit;
   11062 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   11063 	bool txmore;
   11064 	bool rxmore;
   11065 
   11066 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   11067 
   11068 	DPRINTF(sc, WM_DEBUG_TX,
   11069 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   11070 
   11071 	wm_txrxintr_disable(wmq);
   11072 
   11073 	mutex_enter(txq->txq_lock);
   11074 
   11075 	if (txq->txq_stopping) {
   11076 		mutex_exit(txq->txq_lock);
   11077 		return 1;
   11078 	}
   11079 
   11080 	WM_Q_EVCNT_INCR(txq, txdw);
   11081 	if (txlimit > 0) {
   11082 		txmore = wm_txeof(txq, txlimit);
   11083 		/* wm_deferred start() is done in wm_handle_queue(). */
   11084 	} else
   11085 		txmore = true;
   11086 	mutex_exit(txq->txq_lock);
   11087 
   11088 	DPRINTF(sc, WM_DEBUG_RX,
   11089 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   11090 	mutex_enter(rxq->rxq_lock);
   11091 
   11092 	if (rxq->rxq_stopping) {
   11093 		mutex_exit(rxq->rxq_lock);
   11094 		return 1;
   11095 	}
   11096 
   11097 	WM_Q_EVCNT_INCR(rxq, intr);
   11098 	if (rxlimit > 0) {
   11099 		rxmore = wm_rxeof(rxq, rxlimit);
   11100 	} else
   11101 		rxmore = true;
   11102 	mutex_exit(rxq->rxq_lock);
   11103 
   11104 	wm_itrs_writereg(sc, wmq);
   11105 
   11106 	if (txmore || rxmore) {
   11107 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   11108 		wm_sched_handle_queue(sc, wmq);
   11109 	} else
   11110 		wm_txrxintr_enable(wmq);
   11111 
   11112 	return 1;
   11113 }
   11114 
   11115 static void
   11116 wm_handle_queue(void *arg)
   11117 {
   11118 	struct wm_queue *wmq = arg;
   11119 	struct wm_txqueue *txq = &wmq->wmq_txq;
   11120 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   11121 	struct wm_softc *sc = txq->txq_sc;
   11122 	u_int txlimit = sc->sc_tx_process_limit;
   11123 	u_int rxlimit = sc->sc_rx_process_limit;
   11124 	bool txmore;
   11125 	bool rxmore;
   11126 
   11127 	mutex_enter(txq->txq_lock);
   11128 	if (txq->txq_stopping) {
   11129 		mutex_exit(txq->txq_lock);
   11130 		return;
   11131 	}
   11132 	txmore = wm_txeof(txq, txlimit);
   11133 	wm_deferred_start_locked(txq);
   11134 	mutex_exit(txq->txq_lock);
   11135 
   11136 	mutex_enter(rxq->rxq_lock);
   11137 	if (rxq->rxq_stopping) {
   11138 		mutex_exit(rxq->rxq_lock);
   11139 		return;
   11140 	}
   11141 	WM_Q_EVCNT_INCR(rxq, defer);
   11142 	rxmore = wm_rxeof(rxq, rxlimit);
   11143 	mutex_exit(rxq->rxq_lock);
   11144 
   11145 	if (txmore || rxmore) {
   11146 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   11147 		wm_sched_handle_queue(sc, wmq);
   11148 	} else
   11149 		wm_txrxintr_enable(wmq);
   11150 }
   11151 
   11152 static void
   11153 wm_handle_queue_work(struct work *wk, void *context)
   11154 {
   11155 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   11156 
   11157 	/*
   11158 	 * Some qemu environment workaround.  They don't stop interrupt
   11159 	 * immediately.
   11160 	 */
   11161 	wmq->wmq_wq_enqueued = false;
   11162 	wm_handle_queue(wmq);
   11163 }
   11164 
   11165 /*
   11166  * wm_linkintr_msix:
   11167  *
   11168  *	Interrupt service routine for link status change for MSI-X.
   11169  */
   11170 static int
   11171 wm_linkintr_msix(void *arg)
   11172 {
   11173 	struct wm_softc *sc = arg;
   11174 	uint32_t reg;
   11175 	bool has_rxo;
   11176 
   11177 	reg = CSR_READ(sc, WMREG_ICR);
   11178 	mutex_enter(sc->sc_core_lock);
   11179 	DPRINTF(sc, WM_DEBUG_LINK,
   11180 	    ("%s: LINK: got link intr. ICR = %08x\n",
   11181 		device_xname(sc->sc_dev), reg));
   11182 
   11183 	if (sc->sc_core_stopping)
   11184 		goto out;
   11185 
   11186 	if ((reg & ICR_LSC) != 0) {
   11187 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   11188 		wm_linkintr(sc, ICR_LSC);
   11189 	}
   11190 	if ((reg & ICR_GPI(0)) != 0)
   11191 		device_printf(sc->sc_dev, "got module interrupt\n");
   11192 
   11193 	/*
   11194 	 * XXX 82574 MSI-X mode workaround
   11195 	 *
   11196 	 * 82574 MSI-X mode causes a receive overrun(RXO) interrupt as an
   11197 	 * ICR_OTHER MSI-X vector; furthermore it causes neither ICR_RXQ(0)
   11198 	 * nor ICR_RXQ(1) vectors. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   11199 	 * interrupts by writing WMREG_ICS to process receive packets.
   11200 	 */
   11201 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   11202 #if defined(WM_DEBUG)
   11203 		log(LOG_WARNING, "%s: Receive overrun\n",
   11204 		    device_xname(sc->sc_dev));
   11205 #endif /* defined(WM_DEBUG) */
   11206 
   11207 		has_rxo = true;
   11208 		/*
   11209 		 * The RXO interrupt is very high rate when receive traffic is
   11210 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   11211 		 * interrupts. ICR_OTHER will be enabled at the end of
   11212 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   11213 		 * ICR_RXQ(1) interrupts.
   11214 		 */
   11215 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   11216 
   11217 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   11218 	}
   11219 
   11220 
   11221 
   11222 out:
   11223 	mutex_exit(sc->sc_core_lock);
   11224 
   11225 	if (sc->sc_type == WM_T_82574) {
   11226 		if (!has_rxo)
   11227 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   11228 		else
   11229 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   11230 	} else if (sc->sc_type == WM_T_82575)
   11231 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   11232 	else
   11233 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   11234 
   11235 	return 1;
   11236 }
   11237 
   11238 /*
   11239  * Media related.
   11240  * GMII, SGMII, TBI (and SERDES)
   11241  */
   11242 
   11243 /* Common */
   11244 
   11245 /*
   11246  * wm_tbi_serdes_set_linkled:
   11247  *
   11248  *	Update the link LED on TBI and SERDES devices.
   11249  */
   11250 static void
   11251 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   11252 {
   11253 
   11254 	if (sc->sc_tbi_linkup)
   11255 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   11256 	else
   11257 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   11258 
   11259 	/* 82540 or newer devices are active low */
   11260 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   11261 
   11262 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11263 }
   11264 
   11265 /* GMII related */
   11266 
   11267 /*
   11268  * wm_gmii_reset:
   11269  *
   11270  *	Reset the PHY.
   11271  */
   11272 static void
   11273 wm_gmii_reset(struct wm_softc *sc)
   11274 {
   11275 	uint32_t reg;
   11276 	int rv;
   11277 
   11278 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11279 		device_xname(sc->sc_dev), __func__));
   11280 
   11281 	rv = sc->phy.acquire(sc);
   11282 	if (rv != 0) {
   11283 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11284 		    __func__);
   11285 		return;
   11286 	}
   11287 
   11288 	switch (sc->sc_type) {
   11289 	case WM_T_82542_2_0:
   11290 	case WM_T_82542_2_1:
   11291 		/* null */
   11292 		break;
   11293 	case WM_T_82543:
   11294 		/*
   11295 		 * With 82543, we need to force speed and duplex on the MAC
   11296 		 * equal to what the PHY speed and duplex configuration is.
   11297 		 * In addition, we need to perform a hardware reset on the PHY
   11298 		 * to take it out of reset.
   11299 		 */
   11300 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   11301 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11302 
   11303 		/* The PHY reset pin is active-low. */
   11304 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11305 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   11306 		    CTRL_EXT_SWDPIN(4));
   11307 		reg |= CTRL_EXT_SWDPIO(4);
   11308 
   11309 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11310 		CSR_WRITE_FLUSH(sc);
   11311 		delay(10*1000);
   11312 
   11313 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   11314 		CSR_WRITE_FLUSH(sc);
   11315 		delay(150);
   11316 #if 0
   11317 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   11318 #endif
   11319 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   11320 		break;
   11321 	case WM_T_82544:	/* Reset 10000us */
   11322 	case WM_T_82540:
   11323 	case WM_T_82545:
   11324 	case WM_T_82545_3:
   11325 	case WM_T_82546:
   11326 	case WM_T_82546_3:
   11327 	case WM_T_82541:
   11328 	case WM_T_82541_2:
   11329 	case WM_T_82547:
   11330 	case WM_T_82547_2:
   11331 	case WM_T_82571:	/* Reset 100us */
   11332 	case WM_T_82572:
   11333 	case WM_T_82573:
   11334 	case WM_T_82574:
   11335 	case WM_T_82575:
   11336 	case WM_T_82576:
   11337 	case WM_T_82580:
   11338 	case WM_T_I350:
   11339 	case WM_T_I354:
   11340 	case WM_T_I210:
   11341 	case WM_T_I211:
   11342 	case WM_T_82583:
   11343 	case WM_T_80003:
   11344 		/* Generic reset */
   11345 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   11346 		CSR_WRITE_FLUSH(sc);
   11347 		delay(20000);
   11348 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11349 		CSR_WRITE_FLUSH(sc);
   11350 		delay(20000);
   11351 
   11352 		if ((sc->sc_type == WM_T_82541)
   11353 		    || (sc->sc_type == WM_T_82541_2)
   11354 		    || (sc->sc_type == WM_T_82547)
   11355 		    || (sc->sc_type == WM_T_82547_2)) {
   11356 			/* Workaround for igp are done in igp_reset() */
   11357 			/* XXX add code to set LED after phy reset */
   11358 		}
   11359 		break;
   11360 	case WM_T_ICH8:
   11361 	case WM_T_ICH9:
   11362 	case WM_T_ICH10:
   11363 	case WM_T_PCH:
   11364 	case WM_T_PCH2:
   11365 	case WM_T_PCH_LPT:
   11366 	case WM_T_PCH_SPT:
   11367 	case WM_T_PCH_CNP:
   11368 	case WM_T_PCH_TGP:
   11369 		/* Generic reset */
   11370 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   11371 		CSR_WRITE_FLUSH(sc);
   11372 		delay(100);
   11373 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11374 		CSR_WRITE_FLUSH(sc);
   11375 		delay(150);
   11376 		break;
   11377 	default:
   11378 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   11379 		    __func__);
   11380 		break;
   11381 	}
   11382 
   11383 	sc->phy.release(sc);
   11384 
   11385 	/* get_cfg_done */
   11386 	wm_get_cfg_done(sc);
   11387 
   11388 	/* Extra setup */
   11389 	switch (sc->sc_type) {
   11390 	case WM_T_82542_2_0:
   11391 	case WM_T_82542_2_1:
   11392 	case WM_T_82543:
   11393 	case WM_T_82544:
   11394 	case WM_T_82540:
   11395 	case WM_T_82545:
   11396 	case WM_T_82545_3:
   11397 	case WM_T_82546:
   11398 	case WM_T_82546_3:
   11399 	case WM_T_82541_2:
   11400 	case WM_T_82547_2:
   11401 	case WM_T_82571:
   11402 	case WM_T_82572:
   11403 	case WM_T_82573:
   11404 	case WM_T_82574:
   11405 	case WM_T_82583:
   11406 	case WM_T_82575:
   11407 	case WM_T_82576:
   11408 	case WM_T_82580:
   11409 	case WM_T_I350:
   11410 	case WM_T_I354:
   11411 	case WM_T_I210:
   11412 	case WM_T_I211:
   11413 	case WM_T_80003:
   11414 		/* Null */
   11415 		break;
   11416 	case WM_T_82541:
   11417 	case WM_T_82547:
   11418 		/* XXX Configure actively LED after PHY reset */
   11419 		break;
   11420 	case WM_T_ICH8:
   11421 	case WM_T_ICH9:
   11422 	case WM_T_ICH10:
   11423 	case WM_T_PCH:
   11424 	case WM_T_PCH2:
   11425 	case WM_T_PCH_LPT:
   11426 	case WM_T_PCH_SPT:
   11427 	case WM_T_PCH_CNP:
   11428 	case WM_T_PCH_TGP:
   11429 		wm_phy_post_reset(sc);
   11430 		break;
   11431 	default:
   11432 		panic("%s: unknown type\n", __func__);
   11433 		break;
   11434 	}
   11435 }
   11436 
   11437 /*
   11438  * Set up sc_phytype and mii_{read|write}reg.
   11439  *
   11440  *  To identify PHY type, correct read/write function should be selected.
   11441  * To select correct read/write function, PCI ID or MAC type are required
   11442  * without accessing PHY registers.
   11443  *
   11444  *  On the first call of this function, PHY ID is not known yet. Check
   11445  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   11446  * result might be incorrect.
   11447  *
   11448  *  In the second call, PHY OUI and model is used to identify PHY type.
   11449  * It might not be perfect because of the lack of compared entry, but it
   11450  * would be better than the first call.
   11451  *
   11452  *  If the detected new result and previous assumption is different,
   11453  * a diagnostic message will be printed.
   11454  */
   11455 static void
   11456 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   11457     uint16_t phy_model)
   11458 {
   11459 	device_t dev = sc->sc_dev;
   11460 	struct mii_data *mii = &sc->sc_mii;
   11461 	uint16_t new_phytype = WMPHY_UNKNOWN;
   11462 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   11463 	mii_readreg_t new_readreg;
   11464 	mii_writereg_t new_writereg;
   11465 	bool dodiag = true;
   11466 
   11467 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11468 		device_xname(sc->sc_dev), __func__));
   11469 
   11470 	/*
   11471 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   11472 	 * incorrect. So don't print diag output when it's 2nd call.
   11473 	 */
   11474 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   11475 		dodiag = false;
   11476 
   11477 	if (mii->mii_readreg == NULL) {
   11478 		/*
   11479 		 *  This is the first call of this function. For ICH and PCH
   11480 		 * variants, it's difficult to determine the PHY access method
   11481 		 * by sc_type, so use the PCI product ID for some devices.
   11482 		 */
   11483 
   11484 		switch (sc->sc_pcidevid) {
   11485 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   11486 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   11487 			/* 82577 */
   11488 			new_phytype = WMPHY_82577;
   11489 			break;
   11490 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   11491 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   11492 			/* 82578 */
   11493 			new_phytype = WMPHY_82578;
   11494 			break;
   11495 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   11496 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   11497 			/* 82579 */
   11498 			new_phytype = WMPHY_82579;
   11499 			break;
   11500 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   11501 		case PCI_PRODUCT_INTEL_82801I_BM:
   11502 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   11503 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   11504 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   11505 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   11506 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   11507 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   11508 			/* ICH8, 9, 10 with 82567 */
   11509 			new_phytype = WMPHY_BM;
   11510 			break;
   11511 		default:
   11512 			break;
   11513 		}
   11514 	} else {
   11515 		/* It's not the first call. Use PHY OUI and model */
   11516 		switch (phy_oui) {
   11517 		case MII_OUI_ATTANSIC: /* atphy(4) */
   11518 			switch (phy_model) {
   11519 			case MII_MODEL_ATTANSIC_AR8021:
   11520 				new_phytype = WMPHY_82578;
   11521 				break;
   11522 			default:
   11523 				break;
   11524 			}
   11525 			break;
   11526 		case MII_OUI_xxMARVELL:
   11527 			switch (phy_model) {
   11528 			case MII_MODEL_xxMARVELL_I210:
   11529 				new_phytype = WMPHY_I210;
   11530 				break;
   11531 			case MII_MODEL_xxMARVELL_E1011:
   11532 			case MII_MODEL_xxMARVELL_E1000_3:
   11533 			case MII_MODEL_xxMARVELL_E1000_5:
   11534 			case MII_MODEL_xxMARVELL_E1112:
   11535 				new_phytype = WMPHY_M88;
   11536 				break;
   11537 			case MII_MODEL_xxMARVELL_E1149:
   11538 				new_phytype = WMPHY_BM;
   11539 				break;
   11540 			case MII_MODEL_xxMARVELL_E1111:
   11541 			case MII_MODEL_xxMARVELL_I347:
   11542 			case MII_MODEL_xxMARVELL_E1512:
   11543 			case MII_MODEL_xxMARVELL_E1340M:
   11544 			case MII_MODEL_xxMARVELL_E1543:
   11545 				new_phytype = WMPHY_M88;
   11546 				break;
   11547 			case MII_MODEL_xxMARVELL_I82563:
   11548 				new_phytype = WMPHY_GG82563;
   11549 				break;
   11550 			default:
   11551 				break;
   11552 			}
   11553 			break;
   11554 		case MII_OUI_INTEL:
   11555 			switch (phy_model) {
   11556 			case MII_MODEL_INTEL_I82577:
   11557 				new_phytype = WMPHY_82577;
   11558 				break;
   11559 			case MII_MODEL_INTEL_I82579:
   11560 				new_phytype = WMPHY_82579;
   11561 				break;
   11562 			case MII_MODEL_INTEL_I217:
   11563 				new_phytype = WMPHY_I217;
   11564 				break;
   11565 			case MII_MODEL_INTEL_I82580:
   11566 				new_phytype = WMPHY_82580;
   11567 				break;
   11568 			case MII_MODEL_INTEL_I350:
   11569 				new_phytype = WMPHY_I350;
   11570 				break;
   11571 			default:
   11572 				break;
   11573 			}
   11574 			break;
   11575 		case MII_OUI_yyINTEL:
   11576 			switch (phy_model) {
   11577 			case MII_MODEL_yyINTEL_I82562G:
   11578 			case MII_MODEL_yyINTEL_I82562EM:
   11579 			case MII_MODEL_yyINTEL_I82562ET:
   11580 				new_phytype = WMPHY_IFE;
   11581 				break;
   11582 			case MII_MODEL_yyINTEL_IGP01E1000:
   11583 				new_phytype = WMPHY_IGP;
   11584 				break;
   11585 			case MII_MODEL_yyINTEL_I82566:
   11586 				new_phytype = WMPHY_IGP_3;
   11587 				break;
   11588 			default:
   11589 				break;
   11590 			}
   11591 			break;
   11592 		default:
   11593 			break;
   11594 		}
   11595 
   11596 		if (dodiag) {
   11597 			if (new_phytype == WMPHY_UNKNOWN)
   11598 				aprint_verbose_dev(dev,
   11599 				    "%s: Unknown PHY model. OUI=%06x, "
   11600 				    "model=%04x\n", __func__, phy_oui,
   11601 				    phy_model);
   11602 
   11603 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11604 			    && (sc->sc_phytype != new_phytype)) {
   11605 				aprint_error_dev(dev, "Previously assumed PHY "
   11606 				    "type(%u) was incorrect. PHY type from PHY"
   11607 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   11608 			}
   11609 		}
   11610 	}
   11611 
   11612 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   11613 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   11614 		/* SGMII */
   11615 		new_readreg = wm_sgmii_readreg;
   11616 		new_writereg = wm_sgmii_writereg;
   11617 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11618 		/* BM2 (phyaddr == 1) */
   11619 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11620 		    && (new_phytype != WMPHY_BM)
   11621 		    && (new_phytype != WMPHY_UNKNOWN))
   11622 			doubt_phytype = new_phytype;
   11623 		new_phytype = WMPHY_BM;
   11624 		new_readreg = wm_gmii_bm_readreg;
   11625 		new_writereg = wm_gmii_bm_writereg;
   11626 	} else if (sc->sc_type >= WM_T_PCH) {
   11627 		/* All PCH* use _hv_ */
   11628 		new_readreg = wm_gmii_hv_readreg;
   11629 		new_writereg = wm_gmii_hv_writereg;
   11630 	} else if (sc->sc_type >= WM_T_ICH8) {
   11631 		/* non-82567 ICH8, 9 and 10 */
   11632 		new_readreg = wm_gmii_i82544_readreg;
   11633 		new_writereg = wm_gmii_i82544_writereg;
   11634 	} else if (sc->sc_type >= WM_T_80003) {
   11635 		/* 80003 */
   11636 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11637 		    && (new_phytype != WMPHY_GG82563)
   11638 		    && (new_phytype != WMPHY_UNKNOWN))
   11639 			doubt_phytype = new_phytype;
   11640 		new_phytype = WMPHY_GG82563;
   11641 		new_readreg = wm_gmii_i80003_readreg;
   11642 		new_writereg = wm_gmii_i80003_writereg;
   11643 	} else if (sc->sc_type >= WM_T_I210) {
   11644 		/* I210 and I211 */
   11645 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11646 		    && (new_phytype != WMPHY_I210)
   11647 		    && (new_phytype != WMPHY_UNKNOWN))
   11648 			doubt_phytype = new_phytype;
   11649 		new_phytype = WMPHY_I210;
   11650 		new_readreg = wm_gmii_gs40g_readreg;
   11651 		new_writereg = wm_gmii_gs40g_writereg;
   11652 	} else if (sc->sc_type >= WM_T_82580) {
   11653 		/* 82580, I350 and I354 */
   11654 		new_readreg = wm_gmii_82580_readreg;
   11655 		new_writereg = wm_gmii_82580_writereg;
   11656 	} else if (sc->sc_type >= WM_T_82544) {
   11657 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   11658 		new_readreg = wm_gmii_i82544_readreg;
   11659 		new_writereg = wm_gmii_i82544_writereg;
   11660 	} else {
   11661 		new_readreg = wm_gmii_i82543_readreg;
   11662 		new_writereg = wm_gmii_i82543_writereg;
   11663 	}
   11664 
   11665 	if (new_phytype == WMPHY_BM) {
   11666 		/* All BM use _bm_ */
   11667 		new_readreg = wm_gmii_bm_readreg;
   11668 		new_writereg = wm_gmii_bm_writereg;
   11669 	}
   11670 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_TGP)) {
   11671 		/* All PCH* use _hv_ */
   11672 		new_readreg = wm_gmii_hv_readreg;
   11673 		new_writereg = wm_gmii_hv_writereg;
   11674 	}
   11675 
   11676 	/* Diag output */
   11677 	if (dodiag) {
   11678 		if (doubt_phytype != WMPHY_UNKNOWN)
   11679 			aprint_error_dev(dev, "Assumed new PHY type was "
   11680 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   11681 			    new_phytype);
   11682 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11683 		    && (sc->sc_phytype != new_phytype))
   11684 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   11685 			    "was incorrect. New PHY type = %u\n",
   11686 			    sc->sc_phytype, new_phytype);
   11687 
   11688 		if ((mii->mii_readreg != NULL) &&
   11689 		    (new_phytype == WMPHY_UNKNOWN))
   11690 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   11691 
   11692 		if ((mii->mii_readreg != NULL) &&
   11693 		    (mii->mii_readreg != new_readreg))
   11694 			aprint_error_dev(dev, "Previously assumed PHY "
   11695 			    "read/write function was incorrect.\n");
   11696 	}
   11697 
   11698 	/* Update now */
   11699 	sc->sc_phytype = new_phytype;
   11700 	mii->mii_readreg = new_readreg;
   11701 	mii->mii_writereg = new_writereg;
   11702 	if (new_readreg == wm_gmii_hv_readreg) {
   11703 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   11704 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   11705 	} else if (new_readreg == wm_sgmii_readreg) {
   11706 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   11707 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   11708 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   11709 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   11710 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   11711 	}
   11712 }
   11713 
   11714 /*
   11715  * wm_get_phy_id_82575:
   11716  *
   11717  * Return PHY ID. Return -1 if it failed.
   11718  */
   11719 static int
   11720 wm_get_phy_id_82575(struct wm_softc *sc)
   11721 {
   11722 	uint32_t reg;
   11723 	int phyid = -1;
   11724 
   11725 	/* XXX */
   11726 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   11727 		return -1;
   11728 
   11729 	if (wm_sgmii_uses_mdio(sc)) {
   11730 		switch (sc->sc_type) {
   11731 		case WM_T_82575:
   11732 		case WM_T_82576:
   11733 			reg = CSR_READ(sc, WMREG_MDIC);
   11734 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   11735 			break;
   11736 		case WM_T_82580:
   11737 		case WM_T_I350:
   11738 		case WM_T_I354:
   11739 		case WM_T_I210:
   11740 		case WM_T_I211:
   11741 			reg = CSR_READ(sc, WMREG_MDICNFG);
   11742 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   11743 			break;
   11744 		default:
   11745 			return -1;
   11746 		}
   11747 	}
   11748 
   11749 	return phyid;
   11750 }
   11751 
   11752 /*
   11753  * wm_gmii_mediainit:
   11754  *
   11755  *	Initialize media for use on 1000BASE-T devices.
   11756  */
   11757 static void
   11758 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   11759 {
   11760 	device_t dev = sc->sc_dev;
   11761 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11762 	struct mii_data *mii = &sc->sc_mii;
   11763 
   11764 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11765 		device_xname(sc->sc_dev), __func__));
   11766 
   11767 	/* We have GMII. */
   11768 	sc->sc_flags |= WM_F_HAS_MII;
   11769 
   11770 	if (sc->sc_type == WM_T_80003)
   11771 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11772 	else
   11773 		sc->sc_tipg = TIPG_1000T_DFLT;
   11774 
   11775 	/*
   11776 	 * Let the chip set speed/duplex on its own based on
   11777 	 * signals from the PHY.
   11778 	 * XXXbouyer - I'm not sure this is right for the 80003,
   11779 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   11780 	 */
   11781 	sc->sc_ctrl |= CTRL_SLU;
   11782 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11783 
   11784 	/* Initialize our media structures and probe the GMII. */
   11785 	mii->mii_ifp = ifp;
   11786 
   11787 	mii->mii_statchg = wm_gmii_statchg;
   11788 
   11789 	/* get PHY control from SMBus to PCIe */
   11790 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   11791 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   11792 	    || (sc->sc_type == WM_T_PCH_CNP) || (sc->sc_type == WM_T_PCH_TGP))
   11793 		wm_init_phy_workarounds_pchlan(sc);
   11794 
   11795 	wm_gmii_reset(sc);
   11796 
   11797 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11798 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   11799 	    wm_gmii_mediastatus, sc->sc_core_lock);
   11800 
   11801 	/* Setup internal SGMII PHY for SFP */
   11802 	wm_sgmii_sfp_preconfig(sc);
   11803 
   11804 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   11805 	    || (sc->sc_type == WM_T_82580)
   11806 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   11807 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   11808 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   11809 			/* Attach only one port */
   11810 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   11811 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11812 		} else {
   11813 			int i, id;
   11814 			uint32_t ctrl_ext;
   11815 
   11816 			id = wm_get_phy_id_82575(sc);
   11817 			if (id != -1) {
   11818 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   11819 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   11820 			}
   11821 			if ((id == -1)
   11822 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11823 				/* Power on sgmii phy if it is disabled */
   11824 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11825 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   11826 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   11827 				CSR_WRITE_FLUSH(sc);
   11828 				delay(300*1000); /* XXX too long */
   11829 
   11830 				/*
   11831 				 * From 1 to 8.
   11832 				 *
   11833 				 * I2C access fails with I2C register's ERROR
   11834 				 * bit set, so prevent error message while
   11835 				 * scanning.
   11836 				 */
   11837 				sc->phy.no_errprint = true;
   11838 				for (i = 1; i < 8; i++)
   11839 					mii_attach(sc->sc_dev, &sc->sc_mii,
   11840 					    0xffffffff, i, MII_OFFSET_ANY,
   11841 					    MIIF_DOPAUSE);
   11842 				sc->phy.no_errprint = false;
   11843 
   11844 				/* Restore previous sfp cage power state */
   11845 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11846 			}
   11847 		}
   11848 	} else
   11849 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11850 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11851 
   11852 	/*
   11853 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   11854 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   11855 	 */
   11856 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   11857 		(sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)
   11858 		|| (sc->sc_type == WM_T_PCH_TGP))
   11859 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11860 		wm_set_mdio_slow_mode_hv(sc);
   11861 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11862 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11863 	}
   11864 
   11865 	/*
   11866 	 * (For ICH8 variants)
   11867 	 * If PHY detection failed, use BM's r/w function and retry.
   11868 	 */
   11869 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11870 		/* if failed, retry with *_bm_* */
   11871 		aprint_verbose_dev(dev, "Assumed PHY access function "
   11872 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   11873 		    sc->sc_phytype);
   11874 		sc->sc_phytype = WMPHY_BM;
   11875 		mii->mii_readreg = wm_gmii_bm_readreg;
   11876 		mii->mii_writereg = wm_gmii_bm_writereg;
   11877 
   11878 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11879 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11880 	}
   11881 
   11882 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11883 		/* Any PHY wasn't found */
   11884 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   11885 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   11886 		sc->sc_phytype = WMPHY_NONE;
   11887 	} else {
   11888 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   11889 
   11890 		/*
   11891 		 * PHY found! Check PHY type again by the second call of
   11892 		 * wm_gmii_setup_phytype.
   11893 		 */
   11894 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   11895 		    child->mii_mpd_model);
   11896 
   11897 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   11898 	}
   11899 }
   11900 
   11901 /*
   11902  * wm_gmii_mediachange:	[ifmedia interface function]
   11903  *
   11904  *	Set hardware to newly-selected media on a 1000BASE-T device.
   11905  */
   11906 static int
   11907 wm_gmii_mediachange(struct ifnet *ifp)
   11908 {
   11909 	struct wm_softc *sc = ifp->if_softc;
   11910 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11911 	uint32_t reg;
   11912 	int rc;
   11913 
   11914 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11915 		device_xname(sc->sc_dev), __func__));
   11916 
   11917 	KASSERT(mutex_owned(sc->sc_core_lock));
   11918 
   11919 	if ((sc->sc_if_flags & IFF_UP) == 0)
   11920 		return 0;
   11921 
   11922 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   11923 	if ((sc->sc_type == WM_T_82580)
   11924 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   11925 	    || (sc->sc_type == WM_T_I211)) {
   11926 		reg = CSR_READ(sc, WMREG_PHPM);
   11927 		reg &= ~PHPM_GO_LINK_D;
   11928 		CSR_WRITE(sc, WMREG_PHPM, reg);
   11929 	}
   11930 
   11931 	/* Disable D0 LPLU. */
   11932 	wm_lplu_d0_disable(sc);
   11933 
   11934 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   11935 	sc->sc_ctrl |= CTRL_SLU;
   11936 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11937 	    || (sc->sc_type > WM_T_82543)) {
   11938 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   11939 	} else {
   11940 		sc->sc_ctrl &= ~CTRL_ASDE;
   11941 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   11942 		if (ife->ifm_media & IFM_FDX)
   11943 			sc->sc_ctrl |= CTRL_FD;
   11944 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   11945 		case IFM_10_T:
   11946 			sc->sc_ctrl |= CTRL_SPEED_10;
   11947 			break;
   11948 		case IFM_100_TX:
   11949 			sc->sc_ctrl |= CTRL_SPEED_100;
   11950 			break;
   11951 		case IFM_1000_T:
   11952 			sc->sc_ctrl |= CTRL_SPEED_1000;
   11953 			break;
   11954 		case IFM_NONE:
   11955 			/* There is no specific setting for IFM_NONE */
   11956 			break;
   11957 		default:
   11958 			panic("wm_gmii_mediachange: bad media 0x%x",
   11959 			    ife->ifm_media);
   11960 		}
   11961 	}
   11962 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11963 	CSR_WRITE_FLUSH(sc);
   11964 
   11965 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11966 		wm_serdes_mediachange(ifp);
   11967 
   11968 	if (sc->sc_type <= WM_T_82543)
   11969 		wm_gmii_reset(sc);
   11970 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   11971 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   11972 		/* allow time for SFP cage time to power up phy */
   11973 		delay(300 * 1000);
   11974 		wm_gmii_reset(sc);
   11975 	}
   11976 
   11977 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   11978 		return 0;
   11979 	return rc;
   11980 }
   11981 
   11982 /*
   11983  * wm_gmii_mediastatus:	[ifmedia interface function]
   11984  *
   11985  *	Get the current interface media status on a 1000BASE-T device.
   11986  */
   11987 static void
   11988 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11989 {
   11990 	struct wm_softc *sc = ifp->if_softc;
   11991 	struct ethercom *ec = &sc->sc_ethercom;
   11992 	struct mii_data *mii;
   11993 	bool dopoll = true;
   11994 
   11995 	/*
   11996 	 * In normal drivers, ether_mediastatus() is called here.
   11997 	 * To avoid calling mii_pollstat(), ether_mediastatus() is open coded.
   11998 	 */
   11999 	KASSERT(mutex_owned(sc->sc_core_lock));
   12000 	KASSERT(ec->ec_mii != NULL);
   12001 	KASSERT(mii_locked(ec->ec_mii));
   12002 
   12003 	mii = ec->ec_mii;
   12004 	if ((sc->sc_flags & WM_F_DELAY_LINKUP) != 0) {
   12005 		struct timeval now;
   12006 
   12007 		getmicrotime(&now);
   12008 		if (timercmp(&now, &sc->sc_linkup_delay_time, <))
   12009 			dopoll = false;
   12010 		else if (sc->sc_linkup_delay_time.tv_sec != 0) {
   12011 			/* Simplify by checking tv_sec only. It's enough. */
   12012 
   12013 			sc->sc_linkup_delay_time.tv_sec = 0;
   12014 			sc->sc_linkup_delay_time.tv_usec = 0;
   12015 		}
   12016 	}
   12017 
   12018 	/*
   12019 	 * Don't call mii_pollstat() while doing workaround.
   12020 	 * See also wm_linkintr_gmii() and wm_tick().
   12021 	 */
   12022 	if (dopoll)
   12023 		mii_pollstat(mii);
   12024 	ifmr->ifm_active = mii->mii_media_active;
   12025 	ifmr->ifm_status = mii->mii_media_status;
   12026 
   12027 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12028 	    | sc->sc_flowflags;
   12029 }
   12030 
   12031 #define	MDI_IO		CTRL_SWDPIN(2)
   12032 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   12033 #define	MDI_CLK		CTRL_SWDPIN(3)
   12034 
   12035 static void
   12036 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   12037 {
   12038 	uint32_t i, v;
   12039 
   12040 	v = CSR_READ(sc, WMREG_CTRL);
   12041 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   12042 	v |= MDI_DIR | CTRL_SWDPIO(3);
   12043 
   12044 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   12045 		if (data & i)
   12046 			v |= MDI_IO;
   12047 		else
   12048 			v &= ~MDI_IO;
   12049 		CSR_WRITE(sc, WMREG_CTRL, v);
   12050 		CSR_WRITE_FLUSH(sc);
   12051 		delay(10);
   12052 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   12053 		CSR_WRITE_FLUSH(sc);
   12054 		delay(10);
   12055 		CSR_WRITE(sc, WMREG_CTRL, v);
   12056 		CSR_WRITE_FLUSH(sc);
   12057 		delay(10);
   12058 	}
   12059 }
   12060 
   12061 static uint16_t
   12062 wm_i82543_mii_recvbits(struct wm_softc *sc)
   12063 {
   12064 	uint32_t v, i;
   12065 	uint16_t data = 0;
   12066 
   12067 	v = CSR_READ(sc, WMREG_CTRL);
   12068 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   12069 	v |= CTRL_SWDPIO(3);
   12070 
   12071 	CSR_WRITE(sc, WMREG_CTRL, v);
   12072 	CSR_WRITE_FLUSH(sc);
   12073 	delay(10);
   12074 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   12075 	CSR_WRITE_FLUSH(sc);
   12076 	delay(10);
   12077 	CSR_WRITE(sc, WMREG_CTRL, v);
   12078 	CSR_WRITE_FLUSH(sc);
   12079 	delay(10);
   12080 
   12081 	for (i = 0; i < 16; i++) {
   12082 		data <<= 1;
   12083 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   12084 		CSR_WRITE_FLUSH(sc);
   12085 		delay(10);
   12086 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   12087 			data |= 1;
   12088 		CSR_WRITE(sc, WMREG_CTRL, v);
   12089 		CSR_WRITE_FLUSH(sc);
   12090 		delay(10);
   12091 	}
   12092 
   12093 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   12094 	CSR_WRITE_FLUSH(sc);
   12095 	delay(10);
   12096 	CSR_WRITE(sc, WMREG_CTRL, v);
   12097 	CSR_WRITE_FLUSH(sc);
   12098 	delay(10);
   12099 
   12100 	return data;
   12101 }
   12102 
   12103 #undef MDI_IO
   12104 #undef MDI_DIR
   12105 #undef MDI_CLK
   12106 
   12107 /*
   12108  * wm_gmii_i82543_readreg:	[mii interface function]
   12109  *
   12110  *	Read a PHY register on the GMII (i82543 version).
   12111  */
   12112 static int
   12113 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12114 {
   12115 	struct wm_softc *sc = device_private(dev);
   12116 
   12117 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   12118 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   12119 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   12120 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   12121 
   12122 	DPRINTF(sc, WM_DEBUG_GMII,
   12123 	    ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   12124 		device_xname(dev), phy, reg, *val));
   12125 
   12126 	return 0;
   12127 }
   12128 
   12129 /*
   12130  * wm_gmii_i82543_writereg:	[mii interface function]
   12131  *
   12132  *	Write a PHY register on the GMII (i82543 version).
   12133  */
   12134 static int
   12135 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   12136 {
   12137 	struct wm_softc *sc = device_private(dev);
   12138 
   12139 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   12140 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   12141 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   12142 	    (MII_COMMAND_START << 30), 32);
   12143 
   12144 	return 0;
   12145 }
   12146 
   12147 /*
   12148  * wm_gmii_mdic_readreg:	[mii interface function]
   12149  *
   12150  *	Read a PHY register on the GMII.
   12151  */
   12152 static int
   12153 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12154 {
   12155 	struct wm_softc *sc = device_private(dev);
   12156 	uint32_t mdic = 0;
   12157 	int i;
   12158 
   12159 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   12160 	    && (reg > MII_ADDRMASK)) {
   12161 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12162 		    __func__, sc->sc_phytype, reg);
   12163 		reg &= MII_ADDRMASK;
   12164 	}
   12165 
   12166 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   12167 	    MDIC_REGADD(reg));
   12168 
   12169 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   12170 		delay(50);
   12171 		mdic = CSR_READ(sc, WMREG_MDIC);
   12172 		if (mdic & MDIC_READY)
   12173 			break;
   12174 	}
   12175 
   12176 	if ((mdic & MDIC_READY) == 0) {
   12177 		DPRINTF(sc, WM_DEBUG_GMII,
   12178 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   12179 			device_xname(dev), phy, reg));
   12180 		return ETIMEDOUT;
   12181 	} else if (mdic & MDIC_E) {
   12182 		/* This is normal if no PHY is present. */
   12183 		DPRINTF(sc, WM_DEBUG_GMII,
   12184 		    ("%s: MDIC read error: phy %d reg %d\n",
   12185 			device_xname(sc->sc_dev), phy, reg));
   12186 		return -1;
   12187 	} else
   12188 		*val = MDIC_DATA(mdic);
   12189 
   12190 	/*
   12191 	 * Allow some time after each MDIC transaction to avoid
   12192 	 * reading duplicate data in the next MDIC transaction.
   12193 	 */
   12194 	if (sc->sc_type == WM_T_PCH2)
   12195 		delay(100);
   12196 
   12197 	return 0;
   12198 }
   12199 
   12200 /*
   12201  * wm_gmii_mdic_writereg:	[mii interface function]
   12202  *
   12203  *	Write a PHY register on the GMII.
   12204  */
   12205 static int
   12206 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   12207 {
   12208 	struct wm_softc *sc = device_private(dev);
   12209 	uint32_t mdic = 0;
   12210 	int i;
   12211 
   12212 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   12213 	    && (reg > MII_ADDRMASK)) {
   12214 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12215 		    __func__, sc->sc_phytype, reg);
   12216 		reg &= MII_ADDRMASK;
   12217 	}
   12218 
   12219 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   12220 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   12221 
   12222 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   12223 		delay(50);
   12224 		mdic = CSR_READ(sc, WMREG_MDIC);
   12225 		if (mdic & MDIC_READY)
   12226 			break;
   12227 	}
   12228 
   12229 	if ((mdic & MDIC_READY) == 0) {
   12230 		DPRINTF(sc, WM_DEBUG_GMII,
   12231 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   12232 			device_xname(dev), phy, reg));
   12233 		return ETIMEDOUT;
   12234 	} else if (mdic & MDIC_E) {
   12235 		DPRINTF(sc, WM_DEBUG_GMII,
   12236 		    ("%s: MDIC write error: phy %d reg %d\n",
   12237 			device_xname(dev), phy, reg));
   12238 		return -1;
   12239 	}
   12240 
   12241 	/*
   12242 	 * Allow some time after each MDIC transaction to avoid
   12243 	 * reading duplicate data in the next MDIC transaction.
   12244 	 */
   12245 	if (sc->sc_type == WM_T_PCH2)
   12246 		delay(100);
   12247 
   12248 	return 0;
   12249 }
   12250 
   12251 /*
   12252  * wm_gmii_i82544_readreg:	[mii interface function]
   12253  *
   12254  *	Read a PHY register on the GMII.
   12255  */
   12256 static int
   12257 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12258 {
   12259 	struct wm_softc *sc = device_private(dev);
   12260 	int rv;
   12261 
   12262 	rv = sc->phy.acquire(sc);
   12263 	if (rv != 0) {
   12264 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12265 		return rv;
   12266 	}
   12267 
   12268 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   12269 
   12270 	sc->phy.release(sc);
   12271 
   12272 	return rv;
   12273 }
   12274 
   12275 static int
   12276 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12277 {
   12278 	struct wm_softc *sc = device_private(dev);
   12279 	int rv;
   12280 
   12281 	switch (sc->sc_phytype) {
   12282 	case WMPHY_IGP:
   12283 	case WMPHY_IGP_2:
   12284 	case WMPHY_IGP_3:
   12285 		if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   12286 			rv = wm_gmii_mdic_writereg(dev, phy,
   12287 			    IGPHY_PAGE_SELECT, reg);
   12288 			if (rv != 0)
   12289 				return rv;
   12290 		}
   12291 		break;
   12292 	default:
   12293 #ifdef WM_DEBUG
   12294 		if ((reg >> MII_ADDRBITS) != 0)
   12295 			device_printf(dev,
   12296 			    "%s: PHYTYPE = 0x%x, addr = 0x%02x\n",
   12297 			    __func__, sc->sc_phytype, reg);
   12298 #endif
   12299 		break;
   12300 	}
   12301 
   12302 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   12303 }
   12304 
   12305 /*
   12306  * wm_gmii_i82544_writereg:	[mii interface function]
   12307  *
   12308  *	Write a PHY register on the GMII.
   12309  */
   12310 static int
   12311 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   12312 {
   12313 	struct wm_softc *sc = device_private(dev);
   12314 	int rv;
   12315 
   12316 	rv = sc->phy.acquire(sc);
   12317 	if (rv != 0) {
   12318 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12319 		return rv;
   12320 	}
   12321 
   12322 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   12323 	sc->phy.release(sc);
   12324 
   12325 	return rv;
   12326 }
   12327 
   12328 static int
   12329 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12330 {
   12331 	struct wm_softc *sc = device_private(dev);
   12332 	int rv;
   12333 
   12334 	switch (sc->sc_phytype) {
   12335 	case WMPHY_IGP:
   12336 	case WMPHY_IGP_2:
   12337 	case WMPHY_IGP_3:
   12338 		if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   12339 			rv = wm_gmii_mdic_writereg(dev, phy,
   12340 			    IGPHY_PAGE_SELECT, reg);
   12341 			if (rv != 0)
   12342 				return rv;
   12343 		}
   12344 		break;
   12345 	default:
   12346 #ifdef WM_DEBUG
   12347 		if ((reg >> MII_ADDRBITS) != 0)
   12348 			device_printf(dev,
   12349 			    "%s: PHYTYPE == 0x%x, addr = 0x%02x",
   12350 			    __func__, sc->sc_phytype, reg);
   12351 #endif
   12352 		break;
   12353 	}
   12354 
   12355 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   12356 }
   12357 
   12358 /*
   12359  * wm_gmii_i80003_readreg:	[mii interface function]
   12360  *
   12361  *	Read a PHY register on the kumeran
   12362  * This could be handled by the PHY layer if we didn't have to lock the
   12363  * resource ...
   12364  */
   12365 static int
   12366 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12367 {
   12368 	struct wm_softc *sc = device_private(dev);
   12369 	int page_select;
   12370 	uint16_t temp, temp2;
   12371 	int rv;
   12372 
   12373 	if (phy != 1) /* Only one PHY on kumeran bus */
   12374 		return -1;
   12375 
   12376 	rv = sc->phy.acquire(sc);
   12377 	if (rv != 0) {
   12378 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12379 		return rv;
   12380 	}
   12381 
   12382 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   12383 		page_select = GG82563_PHY_PAGE_SELECT;
   12384 	else {
   12385 		/*
   12386 		 * Use Alternative Page Select register to access registers
   12387 		 * 30 and 31.
   12388 		 */
   12389 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   12390 	}
   12391 	temp = reg >> GG82563_PAGE_SHIFT;
   12392 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   12393 		goto out;
   12394 
   12395 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   12396 		/*
   12397 		 * Wait more 200us for a bug of the ready bit in the MDIC
   12398 		 * register.
   12399 		 */
   12400 		delay(200);
   12401 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   12402 		if ((rv != 0) || (temp2 != temp)) {
   12403 			device_printf(dev, "%s failed\n", __func__);
   12404 			rv = -1;
   12405 			goto out;
   12406 		}
   12407 		delay(200);
   12408 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   12409 		delay(200);
   12410 	} else
   12411 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   12412 
   12413 out:
   12414 	sc->phy.release(sc);
   12415 	return rv;
   12416 }
   12417 
   12418 /*
   12419  * wm_gmii_i80003_writereg:	[mii interface function]
   12420  *
   12421  *	Write a PHY register on the kumeran.
   12422  * This could be handled by the PHY layer if we didn't have to lock the
   12423  * resource ...
   12424  */
   12425 static int
   12426 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   12427 {
   12428 	struct wm_softc *sc = device_private(dev);
   12429 	int page_select, rv;
   12430 	uint16_t temp, temp2;
   12431 
   12432 	if (phy != 1) /* Only one PHY on kumeran bus */
   12433 		return -1;
   12434 
   12435 	rv = sc->phy.acquire(sc);
   12436 	if (rv != 0) {
   12437 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12438 		return rv;
   12439 	}
   12440 
   12441 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   12442 		page_select = GG82563_PHY_PAGE_SELECT;
   12443 	else {
   12444 		/*
   12445 		 * Use Alternative Page Select register to access registers
   12446 		 * 30 and 31.
   12447 		 */
   12448 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   12449 	}
   12450 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   12451 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   12452 		goto out;
   12453 
   12454 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   12455 		/*
   12456 		 * Wait more 200us for a bug of the ready bit in the MDIC
   12457 		 * register.
   12458 		 */
   12459 		delay(200);
   12460 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   12461 		if ((rv != 0) || (temp2 != temp)) {
   12462 			device_printf(dev, "%s failed\n", __func__);
   12463 			rv = -1;
   12464 			goto out;
   12465 		}
   12466 		delay(200);
   12467 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   12468 		delay(200);
   12469 	} else
   12470 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   12471 
   12472 out:
   12473 	sc->phy.release(sc);
   12474 	return rv;
   12475 }
   12476 
   12477 /*
   12478  * wm_gmii_bm_readreg:	[mii interface function]
   12479  *
   12480  *	Read a PHY register on the kumeran
   12481  * This could be handled by the PHY layer if we didn't have to lock the
   12482  * resource ...
   12483  */
   12484 static int
   12485 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12486 {
   12487 	struct wm_softc *sc = device_private(dev);
   12488 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   12489 	int rv;
   12490 
   12491 	rv = sc->phy.acquire(sc);
   12492 	if (rv != 0) {
   12493 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12494 		return rv;
   12495 	}
   12496 
   12497 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   12498 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   12499 		    || (reg == 31)) ? 1 : phy;
   12500 	/* Page 800 works differently than the rest so it has its own func */
   12501 	if (page == BM_WUC_PAGE) {
   12502 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   12503 		goto release;
   12504 	}
   12505 
   12506 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   12507 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   12508 		    && (sc->sc_type != WM_T_82583))
   12509 			rv = wm_gmii_mdic_writereg(dev, phy,
   12510 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12511 		else
   12512 			rv = wm_gmii_mdic_writereg(dev, phy,
   12513 			    BME1000_PHY_PAGE_SELECT, page);
   12514 		if (rv != 0)
   12515 			goto release;
   12516 	}
   12517 
   12518 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   12519 
   12520 release:
   12521 	sc->phy.release(sc);
   12522 	return rv;
   12523 }
   12524 
   12525 /*
   12526  * wm_gmii_bm_writereg:	[mii interface function]
   12527  *
   12528  *	Write a PHY register on the kumeran.
   12529  * This could be handled by the PHY layer if we didn't have to lock the
   12530  * resource ...
   12531  */
   12532 static int
   12533 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   12534 {
   12535 	struct wm_softc *sc = device_private(dev);
   12536 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   12537 	int rv;
   12538 
   12539 	rv = sc->phy.acquire(sc);
   12540 	if (rv != 0) {
   12541 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12542 		return rv;
   12543 	}
   12544 
   12545 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   12546 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   12547 		    || (reg == 31)) ? 1 : phy;
   12548 	/* Page 800 works differently than the rest so it has its own func */
   12549 	if (page == BM_WUC_PAGE) {
   12550 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   12551 		goto release;
   12552 	}
   12553 
   12554 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   12555 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   12556 		    && (sc->sc_type != WM_T_82583))
   12557 			rv = wm_gmii_mdic_writereg(dev, phy,
   12558 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12559 		else
   12560 			rv = wm_gmii_mdic_writereg(dev, phy,
   12561 			    BME1000_PHY_PAGE_SELECT, page);
   12562 		if (rv != 0)
   12563 			goto release;
   12564 	}
   12565 
   12566 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   12567 
   12568 release:
   12569 	sc->phy.release(sc);
   12570 	return rv;
   12571 }
   12572 
   12573 /*
   12574  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   12575  *  @dev: pointer to the HW structure
   12576  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   12577  *
   12578  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   12579  *  address to store contents of the BM_WUC_ENABLE_REG register.
   12580  */
   12581 static int
   12582 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   12583 {
   12584 #ifdef WM_DEBUG
   12585 	struct wm_softc *sc = device_private(dev);
   12586 #endif
   12587 	uint16_t temp;
   12588 	int rv;
   12589 
   12590 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   12591 		device_xname(dev), __func__));
   12592 
   12593 	if (!phy_regp)
   12594 		return -1;
   12595 
   12596 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   12597 
   12598 	/* Select Port Control Registers page */
   12599 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12600 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   12601 	if (rv != 0)
   12602 		return rv;
   12603 
   12604 	/* Read WUCE and save it */
   12605 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   12606 	if (rv != 0)
   12607 		return rv;
   12608 
   12609 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   12610 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   12611 	 */
   12612 	temp = *phy_regp;
   12613 	temp |= BM_WUC_ENABLE_BIT;
   12614 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   12615 
   12616 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   12617 		return rv;
   12618 
   12619 	/* Select Host Wakeup Registers page - caller now able to write
   12620 	 * registers on the Wakeup registers page
   12621 	 */
   12622 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12623 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   12624 }
   12625 
   12626 /*
   12627  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   12628  *  @dev: pointer to the HW structure
   12629  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   12630  *
   12631  *  Restore BM_WUC_ENABLE_REG to its original value.
   12632  *
   12633  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   12634  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   12635  *  caller.
   12636  */
   12637 static int
   12638 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   12639 {
   12640 #ifdef WM_DEBUG
   12641 	struct wm_softc *sc = device_private(dev);
   12642 #endif
   12643 
   12644 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   12645 		device_xname(dev), __func__));
   12646 
   12647 	if (!phy_regp)
   12648 		return -1;
   12649 
   12650 	/* Select Port Control Registers page */
   12651 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12652 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   12653 
   12654 	/* Restore 769.17 to its original value */
   12655 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   12656 
   12657 	return 0;
   12658 }
   12659 
   12660 /*
   12661  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   12662  *  @sc: pointer to the HW structure
   12663  *  @offset: register offset to be read or written
   12664  *  @val: pointer to the data to read or write
   12665  *  @rd: determines if operation is read or write
   12666  *  @page_set: BM_WUC_PAGE already set and access enabled
   12667  *
   12668  *  Read the PHY register at offset and store the retrieved information in
   12669  *  data, or write data to PHY register at offset.  Note the procedure to
   12670  *  access the PHY wakeup registers is different than reading the other PHY
   12671  *  registers. It works as such:
   12672  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   12673  *  2) Set page to 800 for host (801 if we were manageability)
   12674  *  3) Write the address using the address opcode (0x11)
   12675  *  4) Read or write the data using the data opcode (0x12)
   12676  *  5) Restore 769.17.2 to its original value
   12677  *
   12678  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   12679  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   12680  *
   12681  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   12682  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   12683  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   12684  */
   12685 static int
   12686 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   12687     bool page_set)
   12688 {
   12689 	struct wm_softc *sc = device_private(dev);
   12690 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   12691 	uint16_t page = BM_PHY_REG_PAGE(offset);
   12692 	uint16_t wuce;
   12693 	int rv = 0;
   12694 
   12695 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12696 		device_xname(dev), __func__));
   12697 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   12698 	if ((sc->sc_type == WM_T_PCH)
   12699 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   12700 		device_printf(dev,
   12701 		    "Attempting to access page %d while gig enabled.\n", page);
   12702 	}
   12703 
   12704 	if (!page_set) {
   12705 		/* Enable access to PHY wakeup registers */
   12706 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   12707 		if (rv != 0) {
   12708 			device_printf(dev,
   12709 			    "%s: Could not enable PHY wakeup reg access\n",
   12710 			    __func__);
   12711 			return rv;
   12712 		}
   12713 	}
   12714 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   12715 		device_xname(sc->sc_dev), __func__, page, regnum));
   12716 
   12717 	/*
   12718 	 * 2) Access PHY wakeup register.
   12719 	 * See wm_access_phy_wakeup_reg_bm.
   12720 	 */
   12721 
   12722 	/* Write the Wakeup register page offset value using opcode 0x11 */
   12723 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   12724 	if (rv != 0)
   12725 		return rv;
   12726 
   12727 	if (rd) {
   12728 		/* Read the Wakeup register page value using opcode 0x12 */
   12729 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   12730 	} else {
   12731 		/* Write the Wakeup register page value using opcode 0x12 */
   12732 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   12733 	}
   12734 	if (rv != 0)
   12735 		return rv;
   12736 
   12737 	if (!page_set)
   12738 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   12739 
   12740 	return rv;
   12741 }
   12742 
   12743 /*
   12744  * wm_gmii_hv_readreg:	[mii interface function]
   12745  *
   12746  *	Read a PHY register on the kumeran
   12747  * This could be handled by the PHY layer if we didn't have to lock the
   12748  * resource ...
   12749  */
   12750 static int
   12751 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12752 {
   12753 	struct wm_softc *sc = device_private(dev);
   12754 	int rv;
   12755 
   12756 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12757 		device_xname(dev), __func__));
   12758 
   12759 	rv = sc->phy.acquire(sc);
   12760 	if (rv != 0) {
   12761 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12762 		return rv;
   12763 	}
   12764 
   12765 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   12766 	sc->phy.release(sc);
   12767 	return rv;
   12768 }
   12769 
   12770 static int
   12771 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12772 {
   12773 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12774 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12775 	int rv;
   12776 
   12777 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12778 
   12779 	/* Page 800 works differently than the rest so it has its own func */
   12780 	if (page == BM_WUC_PAGE)
   12781 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   12782 
   12783 	/*
   12784 	 * Lower than page 768 works differently than the rest so it has its
   12785 	 * own func
   12786 	 */
   12787 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12788 		device_printf(dev, "gmii_hv_readreg!!!\n");
   12789 		return -1;
   12790 	}
   12791 
   12792 	/*
   12793 	 * XXX I21[789] documents say that the SMBus Address register is at
   12794 	 * PHY address 01, Page 0 (not 768), Register 26.
   12795 	 */
   12796 	if (page == HV_INTC_FC_PAGE_START)
   12797 		page = 0;
   12798 
   12799 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12800 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12801 		    page << BME1000_PAGE_SHIFT);
   12802 		if (rv != 0)
   12803 			return rv;
   12804 	}
   12805 
   12806 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   12807 }
   12808 
   12809 /*
   12810  * wm_gmii_hv_writereg:	[mii interface function]
   12811  *
   12812  *	Write a PHY register on the kumeran.
   12813  * This could be handled by the PHY layer if we didn't have to lock the
   12814  * resource ...
   12815  */
   12816 static int
   12817 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   12818 {
   12819 	struct wm_softc *sc = device_private(dev);
   12820 	int rv;
   12821 
   12822 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12823 		device_xname(dev), __func__));
   12824 
   12825 	rv = sc->phy.acquire(sc);
   12826 	if (rv != 0) {
   12827 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12828 		return rv;
   12829 	}
   12830 
   12831 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   12832 	sc->phy.release(sc);
   12833 
   12834 	return rv;
   12835 }
   12836 
   12837 static int
   12838 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12839 {
   12840 	struct wm_softc *sc = device_private(dev);
   12841 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12842 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12843 	int rv;
   12844 
   12845 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12846 
   12847 	/* Page 800 works differently than the rest so it has its own func */
   12848 	if (page == BM_WUC_PAGE)
   12849 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   12850 		    false);
   12851 
   12852 	/*
   12853 	 * Lower than page 768 works differently than the rest so it has its
   12854 	 * own func
   12855 	 */
   12856 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12857 		device_printf(dev, "gmii_hv_writereg!!!\n");
   12858 		return -1;
   12859 	}
   12860 
   12861 	{
   12862 		/*
   12863 		 * XXX I21[789] documents say that the SMBus Address register
   12864 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   12865 		 */
   12866 		if (page == HV_INTC_FC_PAGE_START)
   12867 			page = 0;
   12868 
   12869 		/*
   12870 		 * XXX Workaround MDIO accesses being disabled after entering
   12871 		 * IEEE Power Down (whenever bit 11 of the PHY control
   12872 		 * register is set)
   12873 		 */
   12874 		if (sc->sc_phytype == WMPHY_82578) {
   12875 			struct mii_softc *child;
   12876 
   12877 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12878 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   12879 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   12880 			    && ((val & (1 << 11)) != 0)) {
   12881 				device_printf(dev, "XXX need workaround\n");
   12882 			}
   12883 		}
   12884 
   12885 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12886 			rv = wm_gmii_mdic_writereg(dev, 1,
   12887 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12888 			if (rv != 0)
   12889 				return rv;
   12890 		}
   12891 	}
   12892 
   12893 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   12894 }
   12895 
   12896 /*
   12897  * wm_gmii_82580_readreg:	[mii interface function]
   12898  *
   12899  *	Read a PHY register on the 82580 and I350.
   12900  * This could be handled by the PHY layer if we didn't have to lock the
   12901  * resource ...
   12902  */
   12903 static int
   12904 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12905 {
   12906 	struct wm_softc *sc = device_private(dev);
   12907 	int rv;
   12908 
   12909 	rv = sc->phy.acquire(sc);
   12910 	if (rv != 0) {
   12911 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12912 		return rv;
   12913 	}
   12914 
   12915 #ifdef DIAGNOSTIC
   12916 	if (reg > MII_ADDRMASK) {
   12917 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12918 		    __func__, sc->sc_phytype, reg);
   12919 		reg &= MII_ADDRMASK;
   12920 	}
   12921 #endif
   12922 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   12923 
   12924 	sc->phy.release(sc);
   12925 	return rv;
   12926 }
   12927 
   12928 /*
   12929  * wm_gmii_82580_writereg:	[mii interface function]
   12930  *
   12931  *	Write a PHY register on the 82580 and I350.
   12932  * This could be handled by the PHY layer if we didn't have to lock the
   12933  * resource ...
   12934  */
   12935 static int
   12936 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   12937 {
   12938 	struct wm_softc *sc = device_private(dev);
   12939 	int rv;
   12940 
   12941 	rv = sc->phy.acquire(sc);
   12942 	if (rv != 0) {
   12943 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12944 		return rv;
   12945 	}
   12946 
   12947 #ifdef DIAGNOSTIC
   12948 	if (reg > MII_ADDRMASK) {
   12949 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12950 		    __func__, sc->sc_phytype, reg);
   12951 		reg &= MII_ADDRMASK;
   12952 	}
   12953 #endif
   12954 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   12955 
   12956 	sc->phy.release(sc);
   12957 	return rv;
   12958 }
   12959 
   12960 /*
   12961  * wm_gmii_gs40g_readreg:	[mii interface function]
   12962  *
   12963  *	Read a PHY register on the I2100 and I211.
   12964  * This could be handled by the PHY layer if we didn't have to lock the
   12965  * resource ...
   12966  */
   12967 static int
   12968 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12969 {
   12970 	struct wm_softc *sc = device_private(dev);
   12971 	int page, offset;
   12972 	int rv;
   12973 
   12974 	/* Acquire semaphore */
   12975 	rv = sc->phy.acquire(sc);
   12976 	if (rv != 0) {
   12977 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12978 		return rv;
   12979 	}
   12980 
   12981 	/* Page select */
   12982 	page = reg >> GS40G_PAGE_SHIFT;
   12983 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12984 	if (rv != 0)
   12985 		goto release;
   12986 
   12987 	/* Read reg */
   12988 	offset = reg & GS40G_OFFSET_MASK;
   12989 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   12990 
   12991 release:
   12992 	sc->phy.release(sc);
   12993 	return rv;
   12994 }
   12995 
   12996 /*
   12997  * wm_gmii_gs40g_writereg:	[mii interface function]
   12998  *
   12999  *	Write a PHY register on the I210 and I211.
   13000  * This could be handled by the PHY layer if we didn't have to lock the
   13001  * resource ...
   13002  */
   13003 static int
   13004 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   13005 {
   13006 	struct wm_softc *sc = device_private(dev);
   13007 	uint16_t page;
   13008 	int offset, rv;
   13009 
   13010 	/* Acquire semaphore */
   13011 	rv = sc->phy.acquire(sc);
   13012 	if (rv != 0) {
   13013 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   13014 		return rv;
   13015 	}
   13016 
   13017 	/* Page select */
   13018 	page = reg >> GS40G_PAGE_SHIFT;
   13019 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   13020 	if (rv != 0)
   13021 		goto release;
   13022 
   13023 	/* Write reg */
   13024 	offset = reg & GS40G_OFFSET_MASK;
   13025 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   13026 
   13027 release:
   13028 	/* Release semaphore */
   13029 	sc->phy.release(sc);
   13030 	return rv;
   13031 }
   13032 
   13033 /*
   13034  * wm_gmii_statchg:	[mii interface function]
   13035  *
   13036  *	Callback from MII layer when media changes.
   13037  */
   13038 static void
   13039 wm_gmii_statchg(struct ifnet *ifp)
   13040 {
   13041 	struct wm_softc *sc = ifp->if_softc;
   13042 	struct mii_data *mii = &sc->sc_mii;
   13043 
   13044 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   13045 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   13046 	sc->sc_fcrtl &= ~FCRTL_XONE;
   13047 
   13048 	/* Get flow control negotiation result. */
   13049 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   13050 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   13051 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   13052 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   13053 	}
   13054 
   13055 	if (sc->sc_flowflags & IFM_FLOW) {
   13056 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   13057 			sc->sc_ctrl |= CTRL_TFCE;
   13058 			sc->sc_fcrtl |= FCRTL_XONE;
   13059 		}
   13060 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   13061 			sc->sc_ctrl |= CTRL_RFCE;
   13062 	}
   13063 
   13064 	if (mii->mii_media_active & IFM_FDX) {
   13065 		DPRINTF(sc, WM_DEBUG_LINK,
   13066 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   13067 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   13068 	} else {
   13069 		DPRINTF(sc, WM_DEBUG_LINK,
   13070 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   13071 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   13072 	}
   13073 
   13074 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13075 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   13076 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   13077 	    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   13078 	if (sc->sc_type == WM_T_80003) {
   13079 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   13080 		case IFM_1000_T:
   13081 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   13082 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   13083 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   13084 			break;
   13085 		default:
   13086 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   13087 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   13088 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   13089 			break;
   13090 		}
   13091 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   13092 	}
   13093 }
   13094 
   13095 /* kumeran related (80003, ICH* and PCH*) */
   13096 
   13097 /*
   13098  * wm_kmrn_readreg:
   13099  *
   13100  *	Read a kumeran register
   13101  */
   13102 static int
   13103 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   13104 {
   13105 	int rv;
   13106 
   13107 	if (sc->sc_type == WM_T_80003)
   13108 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   13109 	else
   13110 		rv = sc->phy.acquire(sc);
   13111 	if (rv != 0) {
   13112 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   13113 		    __func__);
   13114 		return rv;
   13115 	}
   13116 
   13117 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   13118 
   13119 	if (sc->sc_type == WM_T_80003)
   13120 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   13121 	else
   13122 		sc->phy.release(sc);
   13123 
   13124 	return rv;
   13125 }
   13126 
   13127 static int
   13128 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   13129 {
   13130 
   13131 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   13132 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   13133 	    KUMCTRLSTA_REN);
   13134 	CSR_WRITE_FLUSH(sc);
   13135 	delay(2);
   13136 
   13137 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   13138 
   13139 	return 0;
   13140 }
   13141 
   13142 /*
   13143  * wm_kmrn_writereg:
   13144  *
   13145  *	Write a kumeran register
   13146  */
   13147 static int
   13148 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   13149 {
   13150 	int rv;
   13151 
   13152 	if (sc->sc_type == WM_T_80003)
   13153 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   13154 	else
   13155 		rv = sc->phy.acquire(sc);
   13156 	if (rv != 0) {
   13157 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   13158 		    __func__);
   13159 		return rv;
   13160 	}
   13161 
   13162 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   13163 
   13164 	if (sc->sc_type == WM_T_80003)
   13165 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   13166 	else
   13167 		sc->phy.release(sc);
   13168 
   13169 	return rv;
   13170 }
   13171 
   13172 static int
   13173 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   13174 {
   13175 
   13176 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   13177 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   13178 
   13179 	return 0;
   13180 }
   13181 
   13182 /*
   13183  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   13184  * This access method is different from IEEE MMD.
   13185  */
   13186 static int
   13187 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   13188 {
   13189 	struct wm_softc *sc = device_private(dev);
   13190 	int rv;
   13191 
   13192 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   13193 	if (rv != 0)
   13194 		return rv;
   13195 
   13196 	if (rd)
   13197 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   13198 	else
   13199 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   13200 	return rv;
   13201 }
   13202 
   13203 static int
   13204 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   13205 {
   13206 
   13207 	return wm_access_emi_reg_locked(dev, reg, val, true);
   13208 }
   13209 
   13210 static int
   13211 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   13212 {
   13213 
   13214 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   13215 }
   13216 
   13217 /* SGMII related */
   13218 
   13219 /*
   13220  * wm_sgmii_uses_mdio
   13221  *
   13222  * Check whether the transaction is to the internal PHY or the external
   13223  * MDIO interface. Return true if it's MDIO.
   13224  */
   13225 static bool
   13226 wm_sgmii_uses_mdio(struct wm_softc *sc)
   13227 {
   13228 	uint32_t reg;
   13229 	bool ismdio = false;
   13230 
   13231 	switch (sc->sc_type) {
   13232 	case WM_T_82575:
   13233 	case WM_T_82576:
   13234 		reg = CSR_READ(sc, WMREG_MDIC);
   13235 		ismdio = ((reg & MDIC_DEST) != 0);
   13236 		break;
   13237 	case WM_T_82580:
   13238 	case WM_T_I350:
   13239 	case WM_T_I354:
   13240 	case WM_T_I210:
   13241 	case WM_T_I211:
   13242 		reg = CSR_READ(sc, WMREG_MDICNFG);
   13243 		ismdio = ((reg & MDICNFG_DEST) != 0);
   13244 		break;
   13245 	default:
   13246 		break;
   13247 	}
   13248 
   13249 	return ismdio;
   13250 }
   13251 
   13252 /* Setup internal SGMII PHY for SFP */
   13253 static void
   13254 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
   13255 {
   13256 	uint16_t id1, id2, phyreg;
   13257 	int i, rv;
   13258 
   13259 	if (((sc->sc_flags & WM_F_SGMII) == 0)
   13260 	    || ((sc->sc_flags & WM_F_SFP) == 0))
   13261 		return;
   13262 
   13263 	for (i = 0; i < MII_NPHY; i++) {
   13264 		sc->phy.no_errprint = true;
   13265 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
   13266 		if (rv != 0)
   13267 			continue;
   13268 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
   13269 		if (rv != 0)
   13270 			continue;
   13271 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
   13272 			continue;
   13273 		sc->phy.no_errprint = false;
   13274 
   13275 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
   13276 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
   13277 		phyreg |= ESSR_SGMII_WOC_COPPER;
   13278 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
   13279 		break;
   13280 	}
   13281 
   13282 }
   13283 
   13284 /*
   13285  * wm_sgmii_readreg:	[mii interface function]
   13286  *
   13287  *	Read a PHY register on the SGMII
   13288  * This could be handled by the PHY layer if we didn't have to lock the
   13289  * resource ...
   13290  */
   13291 static int
   13292 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   13293 {
   13294 	struct wm_softc *sc = device_private(dev);
   13295 	int rv;
   13296 
   13297 	rv = sc->phy.acquire(sc);
   13298 	if (rv != 0) {
   13299 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   13300 		return rv;
   13301 	}
   13302 
   13303 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   13304 
   13305 	sc->phy.release(sc);
   13306 	return rv;
   13307 }
   13308 
   13309 static int
   13310 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   13311 {
   13312 	struct wm_softc *sc = device_private(dev);
   13313 	uint32_t i2ccmd;
   13314 	int i, rv = 0;
   13315 
   13316 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   13317 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   13318 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   13319 
   13320 	/* Poll the ready bit */
   13321 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   13322 		delay(50);
   13323 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   13324 		if (i2ccmd & I2CCMD_READY)
   13325 			break;
   13326 	}
   13327 	if ((i2ccmd & I2CCMD_READY) == 0) {
   13328 		device_printf(dev, "I2CCMD Read did not complete\n");
   13329 		rv = ETIMEDOUT;
   13330 	}
   13331 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   13332 		if (!sc->phy.no_errprint)
   13333 			device_printf(dev, "I2CCMD Error bit set\n");
   13334 		rv = EIO;
   13335 	}
   13336 
   13337 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   13338 
   13339 	return rv;
   13340 }
   13341 
   13342 /*
   13343  * wm_sgmii_writereg:	[mii interface function]
   13344  *
   13345  *	Write a PHY register on the SGMII.
   13346  * This could be handled by the PHY layer if we didn't have to lock the
   13347  * resource ...
   13348  */
   13349 static int
   13350 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   13351 {
   13352 	struct wm_softc *sc = device_private(dev);
   13353 	int rv;
   13354 
   13355 	rv = sc->phy.acquire(sc);
   13356 	if (rv != 0) {
   13357 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   13358 		return rv;
   13359 	}
   13360 
   13361 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   13362 
   13363 	sc->phy.release(sc);
   13364 
   13365 	return rv;
   13366 }
   13367 
   13368 static int
   13369 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   13370 {
   13371 	struct wm_softc *sc = device_private(dev);
   13372 	uint32_t i2ccmd;
   13373 	uint16_t swapdata;
   13374 	int rv = 0;
   13375 	int i;
   13376 
   13377 	/* Swap the data bytes for the I2C interface */
   13378 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   13379 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   13380 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   13381 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   13382 
   13383 	/* Poll the ready bit */
   13384 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   13385 		delay(50);
   13386 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   13387 		if (i2ccmd & I2CCMD_READY)
   13388 			break;
   13389 	}
   13390 	if ((i2ccmd & I2CCMD_READY) == 0) {
   13391 		device_printf(dev, "I2CCMD Write did not complete\n");
   13392 		rv = ETIMEDOUT;
   13393 	}
   13394 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   13395 		device_printf(dev, "I2CCMD Error bit set\n");
   13396 		rv = EIO;
   13397 	}
   13398 
   13399 	return rv;
   13400 }
   13401 
   13402 /* TBI related */
   13403 
   13404 static bool
   13405 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   13406 {
   13407 	bool sig;
   13408 
   13409 	sig = ctrl & CTRL_SWDPIN(1);
   13410 
   13411 	/*
   13412 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   13413 	 * detect a signal, 1 if they don't.
   13414 	 */
   13415 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   13416 		sig = !sig;
   13417 
   13418 	return sig;
   13419 }
   13420 
   13421 /*
   13422  * wm_tbi_mediainit:
   13423  *
   13424  *	Initialize media for use on 1000BASE-X devices.
   13425  */
   13426 static void
   13427 wm_tbi_mediainit(struct wm_softc *sc)
   13428 {
   13429 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   13430 	const char *sep = "";
   13431 
   13432 	if (sc->sc_type < WM_T_82543)
   13433 		sc->sc_tipg = TIPG_WM_DFLT;
   13434 	else
   13435 		sc->sc_tipg = TIPG_LG_DFLT;
   13436 
   13437 	sc->sc_tbi_serdes_anegticks = 5;
   13438 
   13439 	/* Initialize our media structures */
   13440 	sc->sc_mii.mii_ifp = ifp;
   13441 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   13442 
   13443 	ifp->if_baudrate = IF_Gbps(1);
   13444 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   13445 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13446 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   13447 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   13448 		    sc->sc_core_lock);
   13449 	} else {
   13450 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   13451 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   13452 	}
   13453 
   13454 	/*
   13455 	 * SWD Pins:
   13456 	 *
   13457 	 *	0 = Link LED (output)
   13458 	 *	1 = Loss Of Signal (input)
   13459 	 */
   13460 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   13461 
   13462 	/* XXX Perhaps this is only for TBI */
   13463 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13464 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   13465 
   13466 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   13467 		sc->sc_ctrl &= ~CTRL_LRST;
   13468 
   13469 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13470 
   13471 #define	ADD(ss, mm, dd)							  \
   13472 do {									  \
   13473 	aprint_normal("%s%s", sep, ss);					  \
   13474 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   13475 	sep = ", ";							  \
   13476 } while (/*CONSTCOND*/0)
   13477 
   13478 	aprint_normal_dev(sc->sc_dev, "");
   13479 
   13480 	if (sc->sc_type == WM_T_I354) {
   13481 		uint32_t status;
   13482 
   13483 		status = CSR_READ(sc, WMREG_STATUS);
   13484 		if (((status & STATUS_2P5_SKU) != 0)
   13485 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   13486 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   13487 		} else
   13488 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   13489 	} else if (sc->sc_type == WM_T_82545) {
   13490 		/* Only 82545 is LX (XXX except SFP) */
   13491 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   13492 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   13493 	} else if (sc->sc_sfptype != 0) {
   13494 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   13495 		switch (sc->sc_sfptype) {
   13496 		default:
   13497 		case SFF_SFP_ETH_FLAGS_1000SX:
   13498 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   13499 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   13500 			break;
   13501 		case SFF_SFP_ETH_FLAGS_1000LX:
   13502 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   13503 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   13504 			break;
   13505 		case SFF_SFP_ETH_FLAGS_1000CX:
   13506 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   13507 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   13508 			break;
   13509 		case SFF_SFP_ETH_FLAGS_1000T:
   13510 			ADD("1000baseT", IFM_1000_T, 0);
   13511 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   13512 			break;
   13513 		case SFF_SFP_ETH_FLAGS_100FX:
   13514 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   13515 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   13516 			break;
   13517 		}
   13518 	} else {
   13519 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   13520 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   13521 	}
   13522 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   13523 	aprint_normal("\n");
   13524 
   13525 #undef ADD
   13526 
   13527 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   13528 }
   13529 
   13530 /*
   13531  * wm_tbi_mediachange:	[ifmedia interface function]
   13532  *
   13533  *	Set hardware to newly-selected media on a 1000BASE-X device.
   13534  */
   13535 static int
   13536 wm_tbi_mediachange(struct ifnet *ifp)
   13537 {
   13538 	struct wm_softc *sc = ifp->if_softc;
   13539 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   13540 	uint32_t status, ctrl;
   13541 	bool signal;
   13542 	int i;
   13543 
   13544 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   13545 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   13546 		/* XXX need some work for >= 82571 and < 82575 */
   13547 		if (sc->sc_type < WM_T_82575)
   13548 			return 0;
   13549 	}
   13550 
   13551 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   13552 	    || (sc->sc_type >= WM_T_82575))
   13553 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   13554 
   13555 	sc->sc_ctrl &= ~CTRL_LRST;
   13556 	sc->sc_txcw = TXCW_ANE;
   13557 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13558 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   13559 	else if (ife->ifm_media & IFM_FDX)
   13560 		sc->sc_txcw |= TXCW_FD;
   13561 	else
   13562 		sc->sc_txcw |= TXCW_HD;
   13563 
   13564 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   13565 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   13566 
   13567 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   13568 		device_xname(sc->sc_dev), sc->sc_txcw));
   13569 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13570 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13571 	CSR_WRITE_FLUSH(sc);
   13572 	delay(1000);
   13573 
   13574 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13575 	signal = wm_tbi_havesignal(sc, ctrl);
   13576 
   13577 	DPRINTF(sc, WM_DEBUG_LINK,
   13578 	    ("%s: signal = %d\n", device_xname(sc->sc_dev), signal));
   13579 
   13580 	if (signal) {
   13581 		/* Have signal; wait for the link to come up. */
   13582 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   13583 			delay(10000);
   13584 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   13585 				break;
   13586 		}
   13587 
   13588 		DPRINTF(sc, WM_DEBUG_LINK,
   13589 		    ("%s: i = %d after waiting for link\n",
   13590 			device_xname(sc->sc_dev), i));
   13591 
   13592 		status = CSR_READ(sc, WMREG_STATUS);
   13593 		DPRINTF(sc, WM_DEBUG_LINK,
   13594 		    ("%s: status after final read = 0x%x, STATUS_LU = %#"
   13595 			__PRIxBIT "\n",
   13596 			device_xname(sc->sc_dev), status, STATUS_LU));
   13597 		if (status & STATUS_LU) {
   13598 			/* Link is up. */
   13599 			DPRINTF(sc, WM_DEBUG_LINK,
   13600 			    ("%s: LINK: set media -> link up %s\n",
   13601 				device_xname(sc->sc_dev),
   13602 				(status & STATUS_FD) ? "FDX" : "HDX"));
   13603 
   13604 			/*
   13605 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   13606 			 * so we should update sc->sc_ctrl
   13607 			 */
   13608 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   13609 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   13610 			sc->sc_fcrtl &= ~FCRTL_XONE;
   13611 			if (status & STATUS_FD)
   13612 				sc->sc_tctl |=
   13613 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   13614 			else
   13615 				sc->sc_tctl |=
   13616 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   13617 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   13618 				sc->sc_fcrtl |= FCRTL_XONE;
   13619 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   13620 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   13621 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   13622 			sc->sc_tbi_linkup = 1;
   13623 		} else {
   13624 			if (i == WM_LINKUP_TIMEOUT)
   13625 				wm_check_for_link(sc);
   13626 			/* Link is down. */
   13627 			DPRINTF(sc, WM_DEBUG_LINK,
   13628 			    ("%s: LINK: set media -> link down\n",
   13629 				device_xname(sc->sc_dev)));
   13630 			sc->sc_tbi_linkup = 0;
   13631 		}
   13632 	} else {
   13633 		DPRINTF(sc, WM_DEBUG_LINK,
   13634 		    ("%s: LINK: set media -> no signal\n",
   13635 			device_xname(sc->sc_dev)));
   13636 		sc->sc_tbi_linkup = 0;
   13637 	}
   13638 
   13639 	wm_tbi_serdes_set_linkled(sc);
   13640 
   13641 	return 0;
   13642 }
   13643 
   13644 /*
   13645  * wm_tbi_mediastatus:	[ifmedia interface function]
   13646  *
   13647  *	Get the current interface media status on a 1000BASE-X device.
   13648  */
   13649 static void
   13650 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13651 {
   13652 	struct wm_softc *sc = ifp->if_softc;
   13653 	uint32_t ctrl, status;
   13654 
   13655 	ifmr->ifm_status = IFM_AVALID;
   13656 	ifmr->ifm_active = IFM_ETHER;
   13657 
   13658 	status = CSR_READ(sc, WMREG_STATUS);
   13659 	if ((status & STATUS_LU) == 0) {
   13660 		ifmr->ifm_active |= IFM_NONE;
   13661 		return;
   13662 	}
   13663 
   13664 	ifmr->ifm_status |= IFM_ACTIVE;
   13665 	/* Only 82545 is LX */
   13666 	if (sc->sc_type == WM_T_82545)
   13667 		ifmr->ifm_active |= IFM_1000_LX;
   13668 	else
   13669 		ifmr->ifm_active |= IFM_1000_SX;
   13670 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   13671 		ifmr->ifm_active |= IFM_FDX;
   13672 	else
   13673 		ifmr->ifm_active |= IFM_HDX;
   13674 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13675 	if (ctrl & CTRL_RFCE)
   13676 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   13677 	if (ctrl & CTRL_TFCE)
   13678 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   13679 }
   13680 
   13681 /* XXX TBI only */
   13682 static int
   13683 wm_check_for_link(struct wm_softc *sc)
   13684 {
   13685 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   13686 	uint32_t rxcw;
   13687 	uint32_t ctrl;
   13688 	uint32_t status;
   13689 	bool signal;
   13690 
   13691 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
   13692 		device_xname(sc->sc_dev), __func__));
   13693 
   13694 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   13695 		/* XXX need some work for >= 82571 */
   13696 		if (sc->sc_type >= WM_T_82571) {
   13697 			sc->sc_tbi_linkup = 1;
   13698 			return 0;
   13699 		}
   13700 	}
   13701 
   13702 	rxcw = CSR_READ(sc, WMREG_RXCW);
   13703 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13704 	status = CSR_READ(sc, WMREG_STATUS);
   13705 	signal = wm_tbi_havesignal(sc, ctrl);
   13706 
   13707 	DPRINTF(sc, WM_DEBUG_LINK,
   13708 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   13709 		device_xname(sc->sc_dev), __func__, signal,
   13710 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   13711 
   13712 	/*
   13713 	 * SWDPIN   LU RXCW
   13714 	 *	0    0	  0
   13715 	 *	0    0	  1	(should not happen)
   13716 	 *	0    1	  0	(should not happen)
   13717 	 *	0    1	  1	(should not happen)
   13718 	 *	1    0	  0	Disable autonego and force linkup
   13719 	 *	1    0	  1	got /C/ but not linkup yet
   13720 	 *	1    1	  0	(linkup)
   13721 	 *	1    1	  1	If IFM_AUTO, back to autonego
   13722 	 *
   13723 	 */
   13724 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   13725 		DPRINTF(sc, WM_DEBUG_LINK,
   13726 		    ("%s: %s: force linkup and fullduplex\n",
   13727 			device_xname(sc->sc_dev), __func__));
   13728 		sc->sc_tbi_linkup = 0;
   13729 		/* Disable auto-negotiation in the TXCW register */
   13730 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   13731 
   13732 		/*
   13733 		 * Force link-up and also force full-duplex.
   13734 		 *
   13735 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   13736 		 * so we should update sc->sc_ctrl
   13737 		 */
   13738 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   13739 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13740 	} else if (((status & STATUS_LU) != 0)
   13741 	    && ((rxcw & RXCW_C) != 0)
   13742 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   13743 		sc->sc_tbi_linkup = 1;
   13744 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   13745 			device_xname(sc->sc_dev), __func__));
   13746 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13747 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   13748 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   13749 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
   13750 			device_xname(sc->sc_dev), __func__));
   13751 	} else {
   13752 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   13753 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   13754 			status));
   13755 	}
   13756 
   13757 	return 0;
   13758 }
   13759 
   13760 /*
   13761  * wm_tbi_tick:
   13762  *
   13763  *	Check the link on TBI devices.
   13764  *	This function acts as mii_tick().
   13765  */
   13766 static void
   13767 wm_tbi_tick(struct wm_softc *sc)
   13768 {
   13769 	struct mii_data *mii = &sc->sc_mii;
   13770 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13771 	uint32_t status;
   13772 
   13773 	KASSERT(mutex_owned(sc->sc_core_lock));
   13774 
   13775 	status = CSR_READ(sc, WMREG_STATUS);
   13776 
   13777 	/* XXX is this needed? */
   13778 	(void)CSR_READ(sc, WMREG_RXCW);
   13779 	(void)CSR_READ(sc, WMREG_CTRL);
   13780 
   13781 	/* set link status */
   13782 	if ((status & STATUS_LU) == 0) {
   13783 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   13784 			device_xname(sc->sc_dev)));
   13785 		sc->sc_tbi_linkup = 0;
   13786 	} else if (sc->sc_tbi_linkup == 0) {
   13787 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   13788 			device_xname(sc->sc_dev),
   13789 			(status & STATUS_FD) ? "FDX" : "HDX"));
   13790 		sc->sc_tbi_linkup = 1;
   13791 		sc->sc_tbi_serdes_ticks = 0;
   13792 	}
   13793 
   13794 	if ((sc->sc_if_flags & IFF_UP) == 0)
   13795 		goto setled;
   13796 
   13797 	if ((status & STATUS_LU) == 0) {
   13798 		sc->sc_tbi_linkup = 0;
   13799 		/* If the timer expired, retry autonegotiation */
   13800 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13801 		    && (++sc->sc_tbi_serdes_ticks
   13802 			>= sc->sc_tbi_serdes_anegticks)) {
   13803 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13804 				device_xname(sc->sc_dev), __func__));
   13805 			sc->sc_tbi_serdes_ticks = 0;
   13806 			/*
   13807 			 * Reset the link, and let autonegotiation do
   13808 			 * its thing
   13809 			 */
   13810 			sc->sc_ctrl |= CTRL_LRST;
   13811 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13812 			CSR_WRITE_FLUSH(sc);
   13813 			delay(1000);
   13814 			sc->sc_ctrl &= ~CTRL_LRST;
   13815 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13816 			CSR_WRITE_FLUSH(sc);
   13817 			delay(1000);
   13818 			CSR_WRITE(sc, WMREG_TXCW,
   13819 			    sc->sc_txcw & ~TXCW_ANE);
   13820 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13821 		}
   13822 	}
   13823 
   13824 setled:
   13825 	wm_tbi_serdes_set_linkled(sc);
   13826 }
   13827 
   13828 /* SERDES related */
   13829 static void
   13830 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   13831 {
   13832 	uint32_t reg;
   13833 
   13834 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13835 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13836 		return;
   13837 
   13838 	/* Enable PCS to turn on link */
   13839 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   13840 	reg |= PCS_CFG_PCS_EN;
   13841 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   13842 
   13843 	/* Power up the laser */
   13844 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13845 	reg &= ~CTRL_EXT_SWDPIN(3);
   13846 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13847 
   13848 	/* Flush the write to verify completion */
   13849 	CSR_WRITE_FLUSH(sc);
   13850 	delay(1000);
   13851 }
   13852 
   13853 static int
   13854 wm_serdes_mediachange(struct ifnet *ifp)
   13855 {
   13856 	struct wm_softc *sc = ifp->if_softc;
   13857 	bool pcs_autoneg = true; /* XXX */
   13858 	uint32_t ctrl_ext, pcs_lctl, reg;
   13859 
   13860 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13861 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13862 		return 0;
   13863 
   13864 	/* XXX Currently, this function is not called on 8257[12] */
   13865 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   13866 	    || (sc->sc_type >= WM_T_82575))
   13867 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   13868 
   13869 	/* Power on the sfp cage if present */
   13870 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13871 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   13872 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   13873 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13874 
   13875 	sc->sc_ctrl |= CTRL_SLU;
   13876 
   13877 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   13878 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   13879 
   13880 		reg = CSR_READ(sc, WMREG_CONNSW);
   13881 		reg |= CONNSW_ENRGSRC;
   13882 		CSR_WRITE(sc, WMREG_CONNSW, reg);
   13883 	}
   13884 
   13885 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   13886 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   13887 	case CTRL_EXT_LINK_MODE_SGMII:
   13888 		/* SGMII mode lets the phy handle forcing speed/duplex */
   13889 		pcs_autoneg = true;
   13890 		/* Autoneg time out should be disabled for SGMII mode */
   13891 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   13892 		break;
   13893 	case CTRL_EXT_LINK_MODE_1000KX:
   13894 		pcs_autoneg = false;
   13895 		/* FALLTHROUGH */
   13896 	default:
   13897 		if ((sc->sc_type == WM_T_82575)
   13898 		    || (sc->sc_type == WM_T_82576)) {
   13899 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   13900 				pcs_autoneg = false;
   13901 		}
   13902 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   13903 		    | CTRL_FRCFDX;
   13904 
   13905 		/* Set speed of 1000/Full if speed/duplex is forced */
   13906 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   13907 	}
   13908 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13909 
   13910 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   13911 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   13912 
   13913 	if (pcs_autoneg) {
   13914 		/* Set PCS register for autoneg */
   13915 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   13916 
   13917 		/* Disable force flow control for autoneg */
   13918 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   13919 
   13920 		/* Configure flow control advertisement for autoneg */
   13921 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   13922 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   13923 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   13924 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   13925 	} else
   13926 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   13927 
   13928 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   13929 
   13930 	return 0;
   13931 }
   13932 
   13933 static void
   13934 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13935 {
   13936 	struct wm_softc *sc = ifp->if_softc;
   13937 	struct mii_data *mii = &sc->sc_mii;
   13938 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13939 	uint32_t pcs_adv, pcs_lpab, reg;
   13940 
   13941 	ifmr->ifm_status = IFM_AVALID;
   13942 	ifmr->ifm_active = IFM_ETHER;
   13943 
   13944 	/* Check PCS */
   13945 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13946 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   13947 		ifmr->ifm_active |= IFM_NONE;
   13948 		sc->sc_tbi_linkup = 0;
   13949 		goto setled;
   13950 	}
   13951 
   13952 	sc->sc_tbi_linkup = 1;
   13953 	ifmr->ifm_status |= IFM_ACTIVE;
   13954 	if (sc->sc_type == WM_T_I354) {
   13955 		uint32_t status;
   13956 
   13957 		status = CSR_READ(sc, WMREG_STATUS);
   13958 		if (((status & STATUS_2P5_SKU) != 0)
   13959 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   13960 			ifmr->ifm_active |= IFM_2500_KX;
   13961 		} else
   13962 			ifmr->ifm_active |= IFM_1000_KX;
   13963 	} else {
   13964 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   13965 		case PCS_LSTS_SPEED_10:
   13966 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   13967 			break;
   13968 		case PCS_LSTS_SPEED_100:
   13969 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   13970 			break;
   13971 		case PCS_LSTS_SPEED_1000:
   13972 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13973 			break;
   13974 		default:
   13975 			device_printf(sc->sc_dev, "Unknown speed\n");
   13976 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13977 			break;
   13978 		}
   13979 	}
   13980 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   13981 	if ((reg & PCS_LSTS_FDX) != 0)
   13982 		ifmr->ifm_active |= IFM_FDX;
   13983 	else
   13984 		ifmr->ifm_active |= IFM_HDX;
   13985 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   13986 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   13987 		/* Check flow */
   13988 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13989 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   13990 			DPRINTF(sc, WM_DEBUG_LINK,
   13991 			    ("XXX LINKOK but not ACOMP\n"));
   13992 			goto setled;
   13993 		}
   13994 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   13995 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   13996 		DPRINTF(sc, WM_DEBUG_LINK,
   13997 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   13998 		if ((pcs_adv & TXCW_SYM_PAUSE)
   13999 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   14000 			mii->mii_media_active |= IFM_FLOW
   14001 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   14002 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   14003 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   14004 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   14005 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   14006 			mii->mii_media_active |= IFM_FLOW
   14007 			    | IFM_ETH_TXPAUSE;
   14008 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   14009 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   14010 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   14011 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   14012 			mii->mii_media_active |= IFM_FLOW
   14013 			    | IFM_ETH_RXPAUSE;
   14014 		}
   14015 	}
   14016 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   14017 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   14018 setled:
   14019 	wm_tbi_serdes_set_linkled(sc);
   14020 }
   14021 
   14022 /*
   14023  * wm_serdes_tick:
   14024  *
   14025  *	Check the link on serdes devices.
   14026  */
   14027 static void
   14028 wm_serdes_tick(struct wm_softc *sc)
   14029 {
   14030 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   14031 	struct mii_data *mii = &sc->sc_mii;
   14032 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   14033 	uint32_t reg;
   14034 
   14035 	KASSERT(mutex_owned(sc->sc_core_lock));
   14036 
   14037 	mii->mii_media_status = IFM_AVALID;
   14038 	mii->mii_media_active = IFM_ETHER;
   14039 
   14040 	/* Check PCS */
   14041 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   14042 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   14043 		mii->mii_media_status |= IFM_ACTIVE;
   14044 		sc->sc_tbi_linkup = 1;
   14045 		sc->sc_tbi_serdes_ticks = 0;
   14046 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   14047 		if ((reg & PCS_LSTS_FDX) != 0)
   14048 			mii->mii_media_active |= IFM_FDX;
   14049 		else
   14050 			mii->mii_media_active |= IFM_HDX;
   14051 	} else {
   14052 		mii->mii_media_status |= IFM_NONE;
   14053 		sc->sc_tbi_linkup = 0;
   14054 		/* If the timer expired, retry autonegotiation */
   14055 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   14056 		    && (++sc->sc_tbi_serdes_ticks
   14057 			>= sc->sc_tbi_serdes_anegticks)) {
   14058 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   14059 				device_xname(sc->sc_dev), __func__));
   14060 			sc->sc_tbi_serdes_ticks = 0;
   14061 			/* XXX */
   14062 			wm_serdes_mediachange(ifp);
   14063 		}
   14064 	}
   14065 
   14066 	wm_tbi_serdes_set_linkled(sc);
   14067 }
   14068 
   14069 /* SFP related */
   14070 
   14071 static int
   14072 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   14073 {
   14074 	uint32_t i2ccmd;
   14075 	int i;
   14076 
   14077 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   14078 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   14079 
   14080 	/* Poll the ready bit */
   14081 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   14082 		delay(50);
   14083 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   14084 		if (i2ccmd & I2CCMD_READY)
   14085 			break;
   14086 	}
   14087 	if ((i2ccmd & I2CCMD_READY) == 0)
   14088 		return -1;
   14089 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   14090 		return -1;
   14091 
   14092 	*data = i2ccmd & 0x00ff;
   14093 
   14094 	return 0;
   14095 }
   14096 
   14097 static uint32_t
   14098 wm_sfp_get_media_type(struct wm_softc *sc)
   14099 {
   14100 	uint32_t ctrl_ext;
   14101 	uint8_t val = 0;
   14102 	int timeout = 3;
   14103 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   14104 	int rv = -1;
   14105 
   14106 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   14107 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   14108 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   14109 	CSR_WRITE_FLUSH(sc);
   14110 
   14111 	/* Read SFP module data */
   14112 	while (timeout) {
   14113 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   14114 		if (rv == 0)
   14115 			break;
   14116 		delay(100*1000); /* XXX too big */
   14117 		timeout--;
   14118 	}
   14119 	if (rv != 0)
   14120 		goto out;
   14121 
   14122 	switch (val) {
   14123 	case SFF_SFP_ID_SFF:
   14124 		aprint_normal_dev(sc->sc_dev,
   14125 		    "Module/Connector soldered to board\n");
   14126 		break;
   14127 	case SFF_SFP_ID_SFP:
   14128 		sc->sc_flags |= WM_F_SFP;
   14129 		break;
   14130 	case SFF_SFP_ID_UNKNOWN:
   14131 		goto out;
   14132 	default:
   14133 		break;
   14134 	}
   14135 
   14136 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   14137 	if (rv != 0)
   14138 		goto out;
   14139 
   14140 	sc->sc_sfptype = val;
   14141 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   14142 		mediatype = WM_MEDIATYPE_SERDES;
   14143 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   14144 		sc->sc_flags |= WM_F_SGMII;
   14145 		mediatype = WM_MEDIATYPE_COPPER;
   14146 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   14147 		sc->sc_flags |= WM_F_SGMII;
   14148 		mediatype = WM_MEDIATYPE_SERDES;
   14149 	} else {
   14150 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   14151 		    __func__, sc->sc_sfptype);
   14152 		sc->sc_sfptype = 0; /* XXX unknown */
   14153 	}
   14154 
   14155 out:
   14156 	/* Restore I2C interface setting */
   14157 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   14158 
   14159 	return mediatype;
   14160 }
   14161 
   14162 /*
   14163  * NVM related.
   14164  * Microwire, SPI (w/wo EERD) and Flash.
   14165  */
   14166 
   14167 /* Both spi and uwire */
   14168 
   14169 /*
   14170  * wm_eeprom_sendbits:
   14171  *
   14172  *	Send a series of bits to the EEPROM.
   14173  */
   14174 static void
   14175 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   14176 {
   14177 	uint32_t reg;
   14178 	int x;
   14179 
   14180 	reg = CSR_READ(sc, WMREG_EECD);
   14181 
   14182 	for (x = nbits; x > 0; x--) {
   14183 		if (bits & (1U << (x - 1)))
   14184 			reg |= EECD_DI;
   14185 		else
   14186 			reg &= ~EECD_DI;
   14187 		CSR_WRITE(sc, WMREG_EECD, reg);
   14188 		CSR_WRITE_FLUSH(sc);
   14189 		delay(2);
   14190 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   14191 		CSR_WRITE_FLUSH(sc);
   14192 		delay(2);
   14193 		CSR_WRITE(sc, WMREG_EECD, reg);
   14194 		CSR_WRITE_FLUSH(sc);
   14195 		delay(2);
   14196 	}
   14197 }
   14198 
   14199 /*
   14200  * wm_eeprom_recvbits:
   14201  *
   14202  *	Receive a series of bits from the EEPROM.
   14203  */
   14204 static void
   14205 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   14206 {
   14207 	uint32_t reg, val;
   14208 	int x;
   14209 
   14210 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   14211 
   14212 	val = 0;
   14213 	for (x = nbits; x > 0; x--) {
   14214 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   14215 		CSR_WRITE_FLUSH(sc);
   14216 		delay(2);
   14217 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   14218 			val |= (1U << (x - 1));
   14219 		CSR_WRITE(sc, WMREG_EECD, reg);
   14220 		CSR_WRITE_FLUSH(sc);
   14221 		delay(2);
   14222 	}
   14223 	*valp = val;
   14224 }
   14225 
   14226 /* Microwire */
   14227 
   14228 /*
   14229  * wm_nvm_read_uwire:
   14230  *
   14231  *	Read a word from the EEPROM using the MicroWire protocol.
   14232  */
   14233 static int
   14234 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14235 {
   14236 	uint32_t reg, val;
   14237 	int i, rv;
   14238 
   14239 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14240 		device_xname(sc->sc_dev), __func__));
   14241 
   14242 	rv = sc->nvm.acquire(sc);
   14243 	if (rv != 0)
   14244 		return rv;
   14245 
   14246 	for (i = 0; i < wordcnt; i++) {
   14247 		/* Clear SK and DI. */
   14248 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   14249 		CSR_WRITE(sc, WMREG_EECD, reg);
   14250 
   14251 		/*
   14252 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   14253 		 * and Xen.
   14254 		 *
   14255 		 * We use this workaround only for 82540 because qemu's
   14256 		 * e1000 act as 82540.
   14257 		 */
   14258 		if (sc->sc_type == WM_T_82540) {
   14259 			reg |= EECD_SK;
   14260 			CSR_WRITE(sc, WMREG_EECD, reg);
   14261 			reg &= ~EECD_SK;
   14262 			CSR_WRITE(sc, WMREG_EECD, reg);
   14263 			CSR_WRITE_FLUSH(sc);
   14264 			delay(2);
   14265 		}
   14266 		/* XXX: end of workaround */
   14267 
   14268 		/* Set CHIP SELECT. */
   14269 		reg |= EECD_CS;
   14270 		CSR_WRITE(sc, WMREG_EECD, reg);
   14271 		CSR_WRITE_FLUSH(sc);
   14272 		delay(2);
   14273 
   14274 		/* Shift in the READ command. */
   14275 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   14276 
   14277 		/* Shift in address. */
   14278 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   14279 
   14280 		/* Shift out the data. */
   14281 		wm_eeprom_recvbits(sc, &val, 16);
   14282 		data[i] = val & 0xffff;
   14283 
   14284 		/* Clear CHIP SELECT. */
   14285 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   14286 		CSR_WRITE(sc, WMREG_EECD, reg);
   14287 		CSR_WRITE_FLUSH(sc);
   14288 		delay(2);
   14289 	}
   14290 
   14291 	sc->nvm.release(sc);
   14292 	return 0;
   14293 }
   14294 
   14295 /* SPI */
   14296 
   14297 /*
   14298  * Set SPI and FLASH related information from the EECD register.
   14299  * For 82541 and 82547, the word size is taken from EEPROM.
   14300  */
   14301 static int
   14302 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   14303 {
   14304 	int size;
   14305 	uint32_t reg;
   14306 	uint16_t data;
   14307 
   14308 	reg = CSR_READ(sc, WMREG_EECD);
   14309 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   14310 
   14311 	/* Read the size of NVM from EECD by default */
   14312 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   14313 	switch (sc->sc_type) {
   14314 	case WM_T_82541:
   14315 	case WM_T_82541_2:
   14316 	case WM_T_82547:
   14317 	case WM_T_82547_2:
   14318 		/* Set dummy value to access EEPROM */
   14319 		sc->sc_nvm_wordsize = 64;
   14320 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   14321 			aprint_error_dev(sc->sc_dev,
   14322 			    "%s: failed to read EEPROM size\n", __func__);
   14323 		}
   14324 		reg = data;
   14325 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   14326 		if (size == 0)
   14327 			size = 6; /* 64 word size */
   14328 		else
   14329 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   14330 		break;
   14331 	case WM_T_80003:
   14332 	case WM_T_82571:
   14333 	case WM_T_82572:
   14334 	case WM_T_82573: /* SPI case */
   14335 	case WM_T_82574: /* SPI case */
   14336 	case WM_T_82583: /* SPI case */
   14337 		size += NVM_WORD_SIZE_BASE_SHIFT;
   14338 		if (size > 14)
   14339 			size = 14;
   14340 		break;
   14341 	case WM_T_82575:
   14342 	case WM_T_82576:
   14343 	case WM_T_82580:
   14344 	case WM_T_I350:
   14345 	case WM_T_I354:
   14346 	case WM_T_I210:
   14347 	case WM_T_I211:
   14348 		size += NVM_WORD_SIZE_BASE_SHIFT;
   14349 		if (size > 15)
   14350 			size = 15;
   14351 		break;
   14352 	default:
   14353 		aprint_error_dev(sc->sc_dev,
   14354 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   14355 		return -1;
   14356 		break;
   14357 	}
   14358 
   14359 	sc->sc_nvm_wordsize = 1 << size;
   14360 
   14361 	return 0;
   14362 }
   14363 
   14364 /*
   14365  * wm_nvm_ready_spi:
   14366  *
   14367  *	Wait for a SPI EEPROM to be ready for commands.
   14368  */
   14369 static int
   14370 wm_nvm_ready_spi(struct wm_softc *sc)
   14371 {
   14372 	uint32_t val;
   14373 	int usec;
   14374 
   14375 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14376 		device_xname(sc->sc_dev), __func__));
   14377 
   14378 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   14379 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   14380 		wm_eeprom_recvbits(sc, &val, 8);
   14381 		if ((val & SPI_SR_RDY) == 0)
   14382 			break;
   14383 	}
   14384 	if (usec >= SPI_MAX_RETRIES) {
   14385 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   14386 		return -1;
   14387 	}
   14388 	return 0;
   14389 }
   14390 
   14391 /*
   14392  * wm_nvm_read_spi:
   14393  *
   14394  *	Read a work from the EEPROM using the SPI protocol.
   14395  */
   14396 static int
   14397 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14398 {
   14399 	uint32_t reg, val;
   14400 	int i;
   14401 	uint8_t opc;
   14402 	int rv;
   14403 
   14404 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14405 		device_xname(sc->sc_dev), __func__));
   14406 
   14407 	rv = sc->nvm.acquire(sc);
   14408 	if (rv != 0)
   14409 		return rv;
   14410 
   14411 	/* Clear SK and CS. */
   14412 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   14413 	CSR_WRITE(sc, WMREG_EECD, reg);
   14414 	CSR_WRITE_FLUSH(sc);
   14415 	delay(2);
   14416 
   14417 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   14418 		goto out;
   14419 
   14420 	/* Toggle CS to flush commands. */
   14421 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   14422 	CSR_WRITE_FLUSH(sc);
   14423 	delay(2);
   14424 	CSR_WRITE(sc, WMREG_EECD, reg);
   14425 	CSR_WRITE_FLUSH(sc);
   14426 	delay(2);
   14427 
   14428 	opc = SPI_OPC_READ;
   14429 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   14430 		opc |= SPI_OPC_A8;
   14431 
   14432 	wm_eeprom_sendbits(sc, opc, 8);
   14433 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   14434 
   14435 	for (i = 0; i < wordcnt; i++) {
   14436 		wm_eeprom_recvbits(sc, &val, 16);
   14437 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   14438 	}
   14439 
   14440 	/* Raise CS and clear SK. */
   14441 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   14442 	CSR_WRITE(sc, WMREG_EECD, reg);
   14443 	CSR_WRITE_FLUSH(sc);
   14444 	delay(2);
   14445 
   14446 out:
   14447 	sc->nvm.release(sc);
   14448 	return rv;
   14449 }
   14450 
   14451 /* Using with EERD */
   14452 
   14453 static int
   14454 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   14455 {
   14456 	uint32_t attempts = 100000;
   14457 	uint32_t i, reg = 0;
   14458 	int32_t done = -1;
   14459 
   14460 	for (i = 0; i < attempts; i++) {
   14461 		reg = CSR_READ(sc, rw);
   14462 
   14463 		if (reg & EERD_DONE) {
   14464 			done = 0;
   14465 			break;
   14466 		}
   14467 		delay(5);
   14468 	}
   14469 
   14470 	return done;
   14471 }
   14472 
   14473 static int
   14474 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   14475 {
   14476 	int i, eerd = 0;
   14477 	int rv;
   14478 
   14479 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14480 		device_xname(sc->sc_dev), __func__));
   14481 
   14482 	rv = sc->nvm.acquire(sc);
   14483 	if (rv != 0)
   14484 		return rv;
   14485 
   14486 	for (i = 0; i < wordcnt; i++) {
   14487 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   14488 		CSR_WRITE(sc, WMREG_EERD, eerd);
   14489 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   14490 		if (rv != 0) {
   14491 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   14492 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   14493 			break;
   14494 		}
   14495 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   14496 	}
   14497 
   14498 	sc->nvm.release(sc);
   14499 	return rv;
   14500 }
   14501 
   14502 /* Flash */
   14503 
   14504 static int
   14505 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   14506 {
   14507 	uint32_t eecd;
   14508 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   14509 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   14510 	uint32_t nvm_dword = 0;
   14511 	uint8_t sig_byte = 0;
   14512 	int rv;
   14513 
   14514 	switch (sc->sc_type) {
   14515 	case WM_T_PCH_SPT:
   14516 	case WM_T_PCH_CNP:
   14517 	case WM_T_PCH_TGP:
   14518 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   14519 		act_offset = ICH_NVM_SIG_WORD * 2;
   14520 
   14521 		/* Set bank to 0 in case flash read fails. */
   14522 		*bank = 0;
   14523 
   14524 		/* Check bank 0 */
   14525 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   14526 		if (rv != 0)
   14527 			return rv;
   14528 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   14529 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14530 			*bank = 0;
   14531 			return 0;
   14532 		}
   14533 
   14534 		/* Check bank 1 */
   14535 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   14536 		    &nvm_dword);
   14537 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   14538 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14539 			*bank = 1;
   14540 			return 0;
   14541 		}
   14542 		aprint_error_dev(sc->sc_dev,
   14543 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   14544 		return -1;
   14545 	case WM_T_ICH8:
   14546 	case WM_T_ICH9:
   14547 		eecd = CSR_READ(sc, WMREG_EECD);
   14548 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   14549 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   14550 			return 0;
   14551 		}
   14552 		/* FALLTHROUGH */
   14553 	default:
   14554 		/* Default to 0 */
   14555 		*bank = 0;
   14556 
   14557 		/* Check bank 0 */
   14558 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   14559 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14560 			*bank = 0;
   14561 			return 0;
   14562 		}
   14563 
   14564 		/* Check bank 1 */
   14565 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   14566 		    &sig_byte);
   14567 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14568 			*bank = 1;
   14569 			return 0;
   14570 		}
   14571 	}
   14572 
   14573 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   14574 		device_xname(sc->sc_dev)));
   14575 	return -1;
   14576 }
   14577 
   14578 /******************************************************************************
   14579  * This function does initial flash setup so that a new read/write/erase cycle
   14580  * can be started.
   14581  *
   14582  * sc - The pointer to the hw structure
   14583  ****************************************************************************/
   14584 static int32_t
   14585 wm_ich8_cycle_init(struct wm_softc *sc)
   14586 {
   14587 	uint16_t hsfsts;
   14588 	int32_t error = 1;
   14589 	int32_t i     = 0;
   14590 
   14591 	if (sc->sc_type >= WM_T_PCH_SPT)
   14592 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   14593 	else
   14594 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   14595 
   14596 	/* May be check the Flash Des Valid bit in Hw status */
   14597 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   14598 		return error;
   14599 
   14600 	/* Clear FCERR in Hw status by writing 1 */
   14601 	/* Clear DAEL in Hw status by writing a 1 */
   14602 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   14603 
   14604 	if (sc->sc_type >= WM_T_PCH_SPT)
   14605 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   14606 	else
   14607 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   14608 
   14609 	/*
   14610 	 * Either we should have a hardware SPI cycle in progress bit to check
   14611 	 * against, in order to start a new cycle or FDONE bit should be
   14612 	 * changed in the hardware so that it is 1 after hardware reset, which
   14613 	 * can then be used as an indication whether a cycle is in progress or
   14614 	 * has been completed .. we should also have some software semaphore
   14615 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   14616 	 * threads access to those bits can be sequentiallized or a way so that
   14617 	 * 2 threads don't start the cycle at the same time
   14618 	 */
   14619 
   14620 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   14621 		/*
   14622 		 * There is no cycle running at present, so we can start a
   14623 		 * cycle
   14624 		 */
   14625 
   14626 		/* Begin by setting Flash Cycle Done. */
   14627 		hsfsts |= HSFSTS_DONE;
   14628 		if (sc->sc_type >= WM_T_PCH_SPT)
   14629 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14630 			    hsfsts & 0xffffUL);
   14631 		else
   14632 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   14633 		error = 0;
   14634 	} else {
   14635 		/*
   14636 		 * Otherwise poll for sometime so the current cycle has a
   14637 		 * chance to end before giving up.
   14638 		 */
   14639 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   14640 			if (sc->sc_type >= WM_T_PCH_SPT)
   14641 				hsfsts = ICH8_FLASH_READ32(sc,
   14642 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14643 			else
   14644 				hsfsts = ICH8_FLASH_READ16(sc,
   14645 				    ICH_FLASH_HSFSTS);
   14646 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   14647 				error = 0;
   14648 				break;
   14649 			}
   14650 			delay(1);
   14651 		}
   14652 		if (error == 0) {
   14653 			/*
   14654 			 * Successful in waiting for previous cycle to timeout,
   14655 			 * now set the Flash Cycle Done.
   14656 			 */
   14657 			hsfsts |= HSFSTS_DONE;
   14658 			if (sc->sc_type >= WM_T_PCH_SPT)
   14659 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14660 				    hsfsts & 0xffffUL);
   14661 			else
   14662 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   14663 				    hsfsts);
   14664 		}
   14665 	}
   14666 	return error;
   14667 }
   14668 
   14669 /******************************************************************************
   14670  * This function starts a flash cycle and waits for its completion
   14671  *
   14672  * sc - The pointer to the hw structure
   14673  ****************************************************************************/
   14674 static int32_t
   14675 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   14676 {
   14677 	uint16_t hsflctl;
   14678 	uint16_t hsfsts;
   14679 	int32_t error = 1;
   14680 	uint32_t i = 0;
   14681 
   14682 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   14683 	if (sc->sc_type >= WM_T_PCH_SPT)
   14684 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   14685 	else
   14686 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14687 	hsflctl |= HSFCTL_GO;
   14688 	if (sc->sc_type >= WM_T_PCH_SPT)
   14689 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14690 		    (uint32_t)hsflctl << 16);
   14691 	else
   14692 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14693 
   14694 	/* Wait till FDONE bit is set to 1 */
   14695 	do {
   14696 		if (sc->sc_type >= WM_T_PCH_SPT)
   14697 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14698 			    & 0xffffUL;
   14699 		else
   14700 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   14701 		if (hsfsts & HSFSTS_DONE)
   14702 			break;
   14703 		delay(1);
   14704 		i++;
   14705 	} while (i < timeout);
   14706 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   14707 		error = 0;
   14708 
   14709 	return error;
   14710 }
   14711 
   14712 /******************************************************************************
   14713  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   14714  *
   14715  * sc - The pointer to the hw structure
   14716  * index - The index of the byte or word to read.
   14717  * size - Size of data to read, 1=byte 2=word, 4=dword
   14718  * data - Pointer to the word to store the value read.
   14719  *****************************************************************************/
   14720 static int32_t
   14721 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   14722     uint32_t size, uint32_t *data)
   14723 {
   14724 	uint16_t hsfsts;
   14725 	uint16_t hsflctl;
   14726 	uint32_t flash_linear_address;
   14727 	uint32_t flash_data = 0;
   14728 	int32_t error = 1;
   14729 	int32_t count = 0;
   14730 
   14731 	if (size < 1  || size > 4 || data == 0x0 ||
   14732 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   14733 		return error;
   14734 
   14735 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   14736 	    sc->sc_ich8_flash_base;
   14737 
   14738 	do {
   14739 		delay(1);
   14740 		/* Steps */
   14741 		error = wm_ich8_cycle_init(sc);
   14742 		if (error)
   14743 			break;
   14744 
   14745 		if (sc->sc_type >= WM_T_PCH_SPT)
   14746 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14747 			    >> 16;
   14748 		else
   14749 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14750 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   14751 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   14752 		    & HSFCTL_BCOUNT_MASK;
   14753 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   14754 		if (sc->sc_type >= WM_T_PCH_SPT) {
   14755 			/*
   14756 			 * In SPT, This register is in Lan memory space, not
   14757 			 * flash. Therefore, only 32 bit access is supported.
   14758 			 */
   14759 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14760 			    (uint32_t)hsflctl << 16);
   14761 		} else
   14762 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14763 
   14764 		/*
   14765 		 * Write the last 24 bits of index into Flash Linear address
   14766 		 * field in Flash Address
   14767 		 */
   14768 		/* TODO: TBD maybe check the index against the size of flash */
   14769 
   14770 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   14771 
   14772 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   14773 
   14774 		/*
   14775 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   14776 		 * the whole sequence a few more times, else read in (shift in)
   14777 		 * the Flash Data0, the order is least significant byte first
   14778 		 * msb to lsb
   14779 		 */
   14780 		if (error == 0) {
   14781 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   14782 			if (size == 1)
   14783 				*data = (uint8_t)(flash_data & 0x000000FF);
   14784 			else if (size == 2)
   14785 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   14786 			else if (size == 4)
   14787 				*data = (uint32_t)flash_data;
   14788 			break;
   14789 		} else {
   14790 			/*
   14791 			 * If we've gotten here, then things are probably
   14792 			 * completely hosed, but if the error condition is
   14793 			 * detected, it won't hurt to give it another try...
   14794 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   14795 			 */
   14796 			if (sc->sc_type >= WM_T_PCH_SPT)
   14797 				hsfsts = ICH8_FLASH_READ32(sc,
   14798 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14799 			else
   14800 				hsfsts = ICH8_FLASH_READ16(sc,
   14801 				    ICH_FLASH_HSFSTS);
   14802 
   14803 			if (hsfsts & HSFSTS_ERR) {
   14804 				/* Repeat for some time before giving up. */
   14805 				continue;
   14806 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   14807 				break;
   14808 		}
   14809 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   14810 
   14811 	return error;
   14812 }
   14813 
   14814 /******************************************************************************
   14815  * Reads a single byte from the NVM using the ICH8 flash access registers.
   14816  *
   14817  * sc - pointer to wm_hw structure
   14818  * index - The index of the byte to read.
   14819  * data - Pointer to a byte to store the value read.
   14820  *****************************************************************************/
   14821 static int32_t
   14822 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   14823 {
   14824 	int32_t status;
   14825 	uint32_t word = 0;
   14826 
   14827 	status = wm_read_ich8_data(sc, index, 1, &word);
   14828 	if (status == 0)
   14829 		*data = (uint8_t)word;
   14830 	else
   14831 		*data = 0;
   14832 
   14833 	return status;
   14834 }
   14835 
   14836 /******************************************************************************
   14837  * Reads a word from the NVM using the ICH8 flash access registers.
   14838  *
   14839  * sc - pointer to wm_hw structure
   14840  * index - The starting byte index of the word to read.
   14841  * data - Pointer to a word to store the value read.
   14842  *****************************************************************************/
   14843 static int32_t
   14844 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   14845 {
   14846 	int32_t status;
   14847 	uint32_t word = 0;
   14848 
   14849 	status = wm_read_ich8_data(sc, index, 2, &word);
   14850 	if (status == 0)
   14851 		*data = (uint16_t)word;
   14852 	else
   14853 		*data = 0;
   14854 
   14855 	return status;
   14856 }
   14857 
   14858 /******************************************************************************
   14859  * Reads a dword from the NVM using the ICH8 flash access registers.
   14860  *
   14861  * sc - pointer to wm_hw structure
   14862  * index - The starting byte index of the word to read.
   14863  * data - Pointer to a word to store the value read.
   14864  *****************************************************************************/
   14865 static int32_t
   14866 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   14867 {
   14868 	int32_t status;
   14869 
   14870 	status = wm_read_ich8_data(sc, index, 4, data);
   14871 	return status;
   14872 }
   14873 
   14874 /******************************************************************************
   14875  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   14876  * register.
   14877  *
   14878  * sc - Struct containing variables accessed by shared code
   14879  * offset - offset of word in the EEPROM to read
   14880  * data - word read from the EEPROM
   14881  * words - number of words to read
   14882  *****************************************************************************/
   14883 static int
   14884 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14885 {
   14886 	int rv;
   14887 	uint32_t flash_bank = 0;
   14888 	uint32_t act_offset = 0;
   14889 	uint32_t bank_offset = 0;
   14890 	uint16_t word = 0;
   14891 	uint16_t i = 0;
   14892 
   14893 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14894 		device_xname(sc->sc_dev), __func__));
   14895 
   14896 	rv = sc->nvm.acquire(sc);
   14897 	if (rv != 0)
   14898 		return rv;
   14899 
   14900 	/*
   14901 	 * We need to know which is the valid flash bank.  In the event
   14902 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14903 	 * managing flash_bank. So it cannot be trusted and needs
   14904 	 * to be updated with each read.
   14905 	 */
   14906 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14907 	if (rv) {
   14908 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14909 			device_xname(sc->sc_dev)));
   14910 		flash_bank = 0;
   14911 	}
   14912 
   14913 	/*
   14914 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14915 	 * size
   14916 	 */
   14917 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14918 
   14919 	for (i = 0; i < words; i++) {
   14920 		/* The NVM part needs a byte offset, hence * 2 */
   14921 		act_offset = bank_offset + ((offset + i) * 2);
   14922 		rv = wm_read_ich8_word(sc, act_offset, &word);
   14923 		if (rv) {
   14924 			aprint_error_dev(sc->sc_dev,
   14925 			    "%s: failed to read NVM\n", __func__);
   14926 			break;
   14927 		}
   14928 		data[i] = word;
   14929 	}
   14930 
   14931 	sc->nvm.release(sc);
   14932 	return rv;
   14933 }
   14934 
   14935 /******************************************************************************
   14936  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   14937  * register.
   14938  *
   14939  * sc - Struct containing variables accessed by shared code
   14940  * offset - offset of word in the EEPROM to read
   14941  * data - word read from the EEPROM
   14942  * words - number of words to read
   14943  *****************************************************************************/
   14944 static int
   14945 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14946 {
   14947 	int	 rv;
   14948 	uint32_t flash_bank = 0;
   14949 	uint32_t act_offset = 0;
   14950 	uint32_t bank_offset = 0;
   14951 	uint32_t dword = 0;
   14952 	uint16_t i = 0;
   14953 
   14954 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14955 		device_xname(sc->sc_dev), __func__));
   14956 
   14957 	rv = sc->nvm.acquire(sc);
   14958 	if (rv != 0)
   14959 		return rv;
   14960 
   14961 	/*
   14962 	 * We need to know which is the valid flash bank.  In the event
   14963 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14964 	 * managing flash_bank. So it cannot be trusted and needs
   14965 	 * to be updated with each read.
   14966 	 */
   14967 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14968 	if (rv) {
   14969 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14970 			device_xname(sc->sc_dev)));
   14971 		flash_bank = 0;
   14972 	}
   14973 
   14974 	/*
   14975 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14976 	 * size
   14977 	 */
   14978 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14979 
   14980 	for (i = 0; i < words; i++) {
   14981 		/* The NVM part needs a byte offset, hence * 2 */
   14982 		act_offset = bank_offset + ((offset + i) * 2);
   14983 		/* but we must read dword aligned, so mask ... */
   14984 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   14985 		if (rv) {
   14986 			aprint_error_dev(sc->sc_dev,
   14987 			    "%s: failed to read NVM\n", __func__);
   14988 			break;
   14989 		}
   14990 		/* ... and pick out low or high word */
   14991 		if ((act_offset & 0x2) == 0)
   14992 			data[i] = (uint16_t)(dword & 0xFFFF);
   14993 		else
   14994 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   14995 	}
   14996 
   14997 	sc->nvm.release(sc);
   14998 	return rv;
   14999 }
   15000 
   15001 /* iNVM */
   15002 
   15003 static int
   15004 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   15005 {
   15006 	int32_t	 rv = 0;
   15007 	uint32_t invm_dword;
   15008 	uint16_t i;
   15009 	uint8_t record_type, word_address;
   15010 
   15011 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   15012 		device_xname(sc->sc_dev), __func__));
   15013 
   15014 	for (i = 0; i < INVM_SIZE; i++) {
   15015 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   15016 		/* Get record type */
   15017 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   15018 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   15019 			break;
   15020 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   15021 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   15022 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   15023 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   15024 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   15025 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   15026 			if (word_address == address) {
   15027 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   15028 				rv = 0;
   15029 				break;
   15030 			}
   15031 		}
   15032 	}
   15033 
   15034 	return rv;
   15035 }
   15036 
   15037 static int
   15038 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   15039 {
   15040 	int i, rv;
   15041 
   15042 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   15043 		device_xname(sc->sc_dev), __func__));
   15044 
   15045 	rv = sc->nvm.acquire(sc);
   15046 	if (rv != 0)
   15047 		return rv;
   15048 
   15049 	for (i = 0; i < words; i++) {
   15050 		switch (offset + i) {
   15051 		case NVM_OFF_MACADDR:
   15052 		case NVM_OFF_MACADDR1:
   15053 		case NVM_OFF_MACADDR2:
   15054 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   15055 			if (rv != 0) {
   15056 				data[i] = 0xffff;
   15057 				rv = -1;
   15058 			}
   15059 			break;
   15060 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   15061 			rv = wm_nvm_read_word_invm(sc, offset, data);
   15062 			if (rv != 0) {
   15063 				*data = INVM_DEFAULT_AL;
   15064 				rv = 0;
   15065 			}
   15066 			break;
   15067 		case NVM_OFF_CFG2:
   15068 			rv = wm_nvm_read_word_invm(sc, offset, data);
   15069 			if (rv != 0) {
   15070 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   15071 				rv = 0;
   15072 			}
   15073 			break;
   15074 		case NVM_OFF_CFG4:
   15075 			rv = wm_nvm_read_word_invm(sc, offset, data);
   15076 			if (rv != 0) {
   15077 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   15078 				rv = 0;
   15079 			}
   15080 			break;
   15081 		case NVM_OFF_LED_1_CFG:
   15082 			rv = wm_nvm_read_word_invm(sc, offset, data);
   15083 			if (rv != 0) {
   15084 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   15085 				rv = 0;
   15086 			}
   15087 			break;
   15088 		case NVM_OFF_LED_0_2_CFG:
   15089 			rv = wm_nvm_read_word_invm(sc, offset, data);
   15090 			if (rv != 0) {
   15091 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   15092 				rv = 0;
   15093 			}
   15094 			break;
   15095 		case NVM_OFF_ID_LED_SETTINGS:
   15096 			rv = wm_nvm_read_word_invm(sc, offset, data);
   15097 			if (rv != 0) {
   15098 				*data = ID_LED_RESERVED_FFFF;
   15099 				rv = 0;
   15100 			}
   15101 			break;
   15102 		default:
   15103 			DPRINTF(sc, WM_DEBUG_NVM,
   15104 			    ("NVM word 0x%02x is not mapped.\n", offset));
   15105 			*data = NVM_RESERVED_WORD;
   15106 			break;
   15107 		}
   15108 	}
   15109 
   15110 	sc->nvm.release(sc);
   15111 	return rv;
   15112 }
   15113 
   15114 /* Lock, detecting NVM type, validate checksum, version and read */
   15115 
   15116 static int
   15117 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   15118 {
   15119 	uint32_t eecd = 0;
   15120 
   15121 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   15122 	    || sc->sc_type == WM_T_82583) {
   15123 		eecd = CSR_READ(sc, WMREG_EECD);
   15124 
   15125 		/* Isolate bits 15 & 16 */
   15126 		eecd = ((eecd >> 15) & 0x03);
   15127 
   15128 		/* If both bits are set, device is Flash type */
   15129 		if (eecd == 0x03)
   15130 			return 0;
   15131 	}
   15132 	return 1;
   15133 }
   15134 
   15135 static int
   15136 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   15137 {
   15138 	uint32_t eec;
   15139 
   15140 	eec = CSR_READ(sc, WMREG_EEC);
   15141 	if ((eec & EEC_FLASH_DETECTED) != 0)
   15142 		return 1;
   15143 
   15144 	return 0;
   15145 }
   15146 
   15147 /*
   15148  * wm_nvm_validate_checksum
   15149  *
   15150  * The checksum is defined as the sum of the first 64 (16 bit) words.
   15151  */
   15152 static int
   15153 wm_nvm_validate_checksum(struct wm_softc *sc)
   15154 {
   15155 	uint16_t checksum;
   15156 	uint16_t eeprom_data;
   15157 #ifdef WM_DEBUG
   15158 	uint16_t csum_wordaddr, valid_checksum;
   15159 #endif
   15160 	int i;
   15161 
   15162 	checksum = 0;
   15163 
   15164 	/* Don't check for I211 */
   15165 	if (sc->sc_type == WM_T_I211)
   15166 		return 0;
   15167 
   15168 #ifdef WM_DEBUG
   15169 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT) ||
   15170 	    (sc->sc_type == WM_T_PCH_CNP) || (sc->sc_type == WM_T_PCH_TGP)) {
   15171 		csum_wordaddr = NVM_OFF_COMPAT;
   15172 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   15173 	} else {
   15174 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   15175 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   15176 	}
   15177 
   15178 	/* Dump EEPROM image for debug */
   15179 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15180 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15181 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   15182 		/* XXX PCH_SPT? */
   15183 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   15184 		if ((eeprom_data & valid_checksum) == 0)
   15185 			DPRINTF(sc, WM_DEBUG_NVM,
   15186 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   15187 				device_xname(sc->sc_dev), eeprom_data,
   15188 				valid_checksum));
   15189 	}
   15190 
   15191 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
   15192 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   15193 		for (i = 0; i < NVM_SIZE; i++) {
   15194 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   15195 				printf("XXXX ");
   15196 			else
   15197 				printf("%04hx ", eeprom_data);
   15198 			if (i % 8 == 7)
   15199 				printf("\n");
   15200 		}
   15201 	}
   15202 
   15203 #endif /* WM_DEBUG */
   15204 
   15205 	for (i = 0; i < NVM_SIZE; i++) {
   15206 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   15207 			return -1;
   15208 		checksum += eeprom_data;
   15209 	}
   15210 
   15211 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   15212 #ifdef WM_DEBUG
   15213 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   15214 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   15215 #endif
   15216 	}
   15217 
   15218 	return 0;
   15219 }
   15220 
   15221 static void
   15222 wm_nvm_version_invm(struct wm_softc *sc)
   15223 {
   15224 	uint32_t dword;
   15225 
   15226 	/*
   15227 	 * Linux's code to decode version is very strange, so we don't
   15228 	 * obey that algorithm and just use word 61 as the document.
   15229 	 * Perhaps it's not perfect though...
   15230 	 *
   15231 	 * Example:
   15232 	 *
   15233 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   15234 	 */
   15235 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   15236 	dword = __SHIFTOUT(dword, INVM_VER_1);
   15237 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   15238 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   15239 }
   15240 
   15241 static void
   15242 wm_nvm_version(struct wm_softc *sc)
   15243 {
   15244 	uint16_t major, minor, build, patch;
   15245 	uint16_t uid0, uid1;
   15246 	uint16_t nvm_data;
   15247 	uint16_t off;
   15248 	bool check_version = false;
   15249 	bool check_optionrom = false;
   15250 	bool have_build = false;
   15251 	bool have_uid = true;
   15252 
   15253 	/*
   15254 	 * Version format:
   15255 	 *
   15256 	 * XYYZ
   15257 	 * X0YZ
   15258 	 * X0YY
   15259 	 *
   15260 	 * Example:
   15261 	 *
   15262 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   15263 	 *	82571	0x50a6	5.10.6?
   15264 	 *	82572	0x506a	5.6.10?
   15265 	 *	82572EI	0x5069	5.6.9?
   15266 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   15267 	 *		0x2013	2.1.3?
   15268 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   15269 	 * ICH8+82567	0x0040	0.4.0?
   15270 	 * ICH9+82566	0x1040	1.4.0?
   15271 	 *ICH10+82567	0x0043	0.4.3?
   15272 	 *  PCH+82577	0x00c1	0.12.1?
   15273 	 * PCH2+82579	0x00d3	0.13.3?
   15274 	 *		0x00d4	0.13.4?
   15275 	 *  LPT+I218	0x0023	0.2.3?
   15276 	 *  SPT+I219	0x0084	0.8.4?
   15277 	 *  CNP+I219	0x0054	0.5.4?
   15278 	 */
   15279 
   15280 	/*
   15281 	 * XXX
   15282 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   15283 	 * I've never seen real 82574 hardware with such small SPI ROM.
   15284 	 */
   15285 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   15286 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   15287 		have_uid = false;
   15288 
   15289 	switch (sc->sc_type) {
   15290 	case WM_T_82571:
   15291 	case WM_T_82572:
   15292 	case WM_T_82574:
   15293 	case WM_T_82583:
   15294 		check_version = true;
   15295 		check_optionrom = true;
   15296 		have_build = true;
   15297 		break;
   15298 	case WM_T_ICH8:
   15299 	case WM_T_ICH9:
   15300 	case WM_T_ICH10:
   15301 	case WM_T_PCH:
   15302 	case WM_T_PCH2:
   15303 	case WM_T_PCH_LPT:
   15304 	case WM_T_PCH_SPT:
   15305 	case WM_T_PCH_CNP:
   15306 	case WM_T_PCH_TGP:
   15307 		check_version = true;
   15308 		have_build = true;
   15309 		have_uid = false;
   15310 		break;
   15311 	case WM_T_82575:
   15312 	case WM_T_82576:
   15313 	case WM_T_82580:
   15314 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   15315 			check_version = true;
   15316 		break;
   15317 	case WM_T_I211:
   15318 		wm_nvm_version_invm(sc);
   15319 		have_uid = false;
   15320 		goto printver;
   15321 	case WM_T_I210:
   15322 		if (!wm_nvm_flash_presence_i210(sc)) {
   15323 			wm_nvm_version_invm(sc);
   15324 			have_uid = false;
   15325 			goto printver;
   15326 		}
   15327 		/* FALLTHROUGH */
   15328 	case WM_T_I350:
   15329 	case WM_T_I354:
   15330 		check_version = true;
   15331 		check_optionrom = true;
   15332 		break;
   15333 	default:
   15334 		return;
   15335 	}
   15336 	if (check_version
   15337 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   15338 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   15339 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   15340 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   15341 			build = nvm_data & NVM_BUILD_MASK;
   15342 			have_build = true;
   15343 		} else
   15344 			minor = nvm_data & 0x00ff;
   15345 
   15346 		/* Decimal */
   15347 		minor = (minor / 16) * 10 + (minor % 16);
   15348 		sc->sc_nvm_ver_major = major;
   15349 		sc->sc_nvm_ver_minor = minor;
   15350 
   15351 printver:
   15352 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   15353 		    sc->sc_nvm_ver_minor);
   15354 		if (have_build) {
   15355 			sc->sc_nvm_ver_build = build;
   15356 			aprint_verbose(".%d", build);
   15357 		}
   15358 	}
   15359 
   15360 	/* Assume the Option ROM area is at avove NVM_SIZE */
   15361 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   15362 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   15363 		/* Option ROM Version */
   15364 		if ((off != 0x0000) && (off != 0xffff)) {
   15365 			int rv;
   15366 
   15367 			off += NVM_COMBO_VER_OFF;
   15368 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   15369 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   15370 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   15371 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   15372 				/* 16bits */
   15373 				major = uid0 >> 8;
   15374 				build = (uid0 << 8) | (uid1 >> 8);
   15375 				patch = uid1 & 0x00ff;
   15376 				aprint_verbose(", option ROM Version %d.%d.%d",
   15377 				    major, build, patch);
   15378 			}
   15379 		}
   15380 	}
   15381 
   15382 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   15383 		aprint_verbose(", Image Unique ID %08x",
   15384 		    ((uint32_t)uid1 << 16) | uid0);
   15385 }
   15386 
   15387 /*
   15388  * wm_nvm_read:
   15389  *
   15390  *	Read data from the serial EEPROM.
   15391  */
   15392 static int
   15393 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   15394 {
   15395 	int rv;
   15396 
   15397 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   15398 		device_xname(sc->sc_dev), __func__));
   15399 
   15400 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   15401 		return -1;
   15402 
   15403 	rv = sc->nvm.read(sc, word, wordcnt, data);
   15404 
   15405 	return rv;
   15406 }
   15407 
   15408 /*
   15409  * Hardware semaphores.
   15410  * Very complexed...
   15411  */
   15412 
   15413 static int
   15414 wm_get_null(struct wm_softc *sc)
   15415 {
   15416 
   15417 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15418 		device_xname(sc->sc_dev), __func__));
   15419 	return 0;
   15420 }
   15421 
   15422 static void
   15423 wm_put_null(struct wm_softc *sc)
   15424 {
   15425 
   15426 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15427 		device_xname(sc->sc_dev), __func__));
   15428 	return;
   15429 }
   15430 
   15431 static int
   15432 wm_get_eecd(struct wm_softc *sc)
   15433 {
   15434 	uint32_t reg;
   15435 	int x;
   15436 
   15437 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   15438 		device_xname(sc->sc_dev), __func__));
   15439 
   15440 	reg = CSR_READ(sc, WMREG_EECD);
   15441 
   15442 	/* Request EEPROM access. */
   15443 	reg |= EECD_EE_REQ;
   15444 	CSR_WRITE(sc, WMREG_EECD, reg);
   15445 
   15446 	/* ..and wait for it to be granted. */
   15447 	for (x = 0; x < 1000; x++) {
   15448 		reg = CSR_READ(sc, WMREG_EECD);
   15449 		if (reg & EECD_EE_GNT)
   15450 			break;
   15451 		delay(5);
   15452 	}
   15453 	if ((reg & EECD_EE_GNT) == 0) {
   15454 		aprint_error_dev(sc->sc_dev,
   15455 		    "could not acquire EEPROM GNT\n");
   15456 		reg &= ~EECD_EE_REQ;
   15457 		CSR_WRITE(sc, WMREG_EECD, reg);
   15458 		return -1;
   15459 	}
   15460 
   15461 	return 0;
   15462 }
   15463 
   15464 static void
   15465 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   15466 {
   15467 
   15468 	*eecd |= EECD_SK;
   15469 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   15470 	CSR_WRITE_FLUSH(sc);
   15471 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   15472 		delay(1);
   15473 	else
   15474 		delay(50);
   15475 }
   15476 
   15477 static void
   15478 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   15479 {
   15480 
   15481 	*eecd &= ~EECD_SK;
   15482 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   15483 	CSR_WRITE_FLUSH(sc);
   15484 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   15485 		delay(1);
   15486 	else
   15487 		delay(50);
   15488 }
   15489 
   15490 static void
   15491 wm_put_eecd(struct wm_softc *sc)
   15492 {
   15493 	uint32_t reg;
   15494 
   15495 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15496 		device_xname(sc->sc_dev), __func__));
   15497 
   15498 	/* Stop nvm */
   15499 	reg = CSR_READ(sc, WMREG_EECD);
   15500 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   15501 		/* Pull CS high */
   15502 		reg |= EECD_CS;
   15503 		wm_nvm_eec_clock_lower(sc, &reg);
   15504 	} else {
   15505 		/* CS on Microwire is active-high */
   15506 		reg &= ~(EECD_CS | EECD_DI);
   15507 		CSR_WRITE(sc, WMREG_EECD, reg);
   15508 		wm_nvm_eec_clock_raise(sc, &reg);
   15509 		wm_nvm_eec_clock_lower(sc, &reg);
   15510 	}
   15511 
   15512 	reg = CSR_READ(sc, WMREG_EECD);
   15513 	reg &= ~EECD_EE_REQ;
   15514 	CSR_WRITE(sc, WMREG_EECD, reg);
   15515 
   15516 	return;
   15517 }
   15518 
   15519 /*
   15520  * Get hardware semaphore.
   15521  * Same as e1000_get_hw_semaphore_generic()
   15522  */
   15523 static int
   15524 wm_get_swsm_semaphore(struct wm_softc *sc)
   15525 {
   15526 	int32_t timeout;
   15527 	uint32_t swsm;
   15528 
   15529 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15530 		device_xname(sc->sc_dev), __func__));
   15531 	KASSERT(sc->sc_nvm_wordsize > 0);
   15532 
   15533 retry:
   15534 	/* Get the SW semaphore. */
   15535 	timeout = sc->sc_nvm_wordsize + 1;
   15536 	while (timeout) {
   15537 		swsm = CSR_READ(sc, WMREG_SWSM);
   15538 
   15539 		if ((swsm & SWSM_SMBI) == 0)
   15540 			break;
   15541 
   15542 		delay(50);
   15543 		timeout--;
   15544 	}
   15545 
   15546 	if (timeout == 0) {
   15547 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   15548 			/*
   15549 			 * In rare circumstances, the SW semaphore may already
   15550 			 * be held unintentionally. Clear the semaphore once
   15551 			 * before giving up.
   15552 			 */
   15553 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   15554 			wm_put_swsm_semaphore(sc);
   15555 			goto retry;
   15556 		}
   15557 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
   15558 		return -1;
   15559 	}
   15560 
   15561 	/* Get the FW semaphore. */
   15562 	timeout = sc->sc_nvm_wordsize + 1;
   15563 	while (timeout) {
   15564 		swsm = CSR_READ(sc, WMREG_SWSM);
   15565 		swsm |= SWSM_SWESMBI;
   15566 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   15567 		/* If we managed to set the bit we got the semaphore. */
   15568 		swsm = CSR_READ(sc, WMREG_SWSM);
   15569 		if (swsm & SWSM_SWESMBI)
   15570 			break;
   15571 
   15572 		delay(50);
   15573 		timeout--;
   15574 	}
   15575 
   15576 	if (timeout == 0) {
   15577 		aprint_error_dev(sc->sc_dev,
   15578 		    "could not acquire SWSM SWESMBI\n");
   15579 		/* Release semaphores */
   15580 		wm_put_swsm_semaphore(sc);
   15581 		return -1;
   15582 	}
   15583 	return 0;
   15584 }
   15585 
   15586 /*
   15587  * Put hardware semaphore.
   15588  * Same as e1000_put_hw_semaphore_generic()
   15589  */
   15590 static void
   15591 wm_put_swsm_semaphore(struct wm_softc *sc)
   15592 {
   15593 	uint32_t swsm;
   15594 
   15595 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15596 		device_xname(sc->sc_dev), __func__));
   15597 
   15598 	swsm = CSR_READ(sc, WMREG_SWSM);
   15599 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   15600 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   15601 }
   15602 
   15603 /*
   15604  * Get SW/FW semaphore.
   15605  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   15606  */
   15607 static int
   15608 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   15609 {
   15610 	uint32_t swfw_sync;
   15611 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   15612 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   15613 	int timeout;
   15614 
   15615 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15616 		device_xname(sc->sc_dev), __func__));
   15617 
   15618 	if (sc->sc_type == WM_T_80003)
   15619 		timeout = 50;
   15620 	else
   15621 		timeout = 200;
   15622 
   15623 	while (timeout) {
   15624 		if (wm_get_swsm_semaphore(sc)) {
   15625 			aprint_error_dev(sc->sc_dev,
   15626 			    "%s: failed to get semaphore\n",
   15627 			    __func__);
   15628 			return -1;
   15629 		}
   15630 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   15631 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   15632 			swfw_sync |= swmask;
   15633 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   15634 			wm_put_swsm_semaphore(sc);
   15635 			return 0;
   15636 		}
   15637 		wm_put_swsm_semaphore(sc);
   15638 		delay(5000);
   15639 		timeout--;
   15640 	}
   15641 	device_printf(sc->sc_dev,
   15642 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   15643 	    mask, swfw_sync);
   15644 	return -1;
   15645 }
   15646 
   15647 static void
   15648 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   15649 {
   15650 	uint32_t swfw_sync;
   15651 
   15652 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15653 		device_xname(sc->sc_dev), __func__));
   15654 
   15655 	while (wm_get_swsm_semaphore(sc) != 0)
   15656 		continue;
   15657 
   15658 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   15659 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   15660 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   15661 
   15662 	wm_put_swsm_semaphore(sc);
   15663 }
   15664 
   15665 static int
   15666 wm_get_nvm_80003(struct wm_softc *sc)
   15667 {
   15668 	int rv;
   15669 
   15670 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   15671 		device_xname(sc->sc_dev), __func__));
   15672 
   15673 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   15674 		aprint_error_dev(sc->sc_dev,
   15675 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   15676 		return rv;
   15677 	}
   15678 
   15679 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15680 	    && (rv = wm_get_eecd(sc)) != 0) {
   15681 		aprint_error_dev(sc->sc_dev,
   15682 		    "%s: failed to get semaphore(EECD)\n", __func__);
   15683 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15684 		return rv;
   15685 	}
   15686 
   15687 	return 0;
   15688 }
   15689 
   15690 static void
   15691 wm_put_nvm_80003(struct wm_softc *sc)
   15692 {
   15693 
   15694 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15695 		device_xname(sc->sc_dev), __func__));
   15696 
   15697 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15698 		wm_put_eecd(sc);
   15699 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15700 }
   15701 
   15702 static int
   15703 wm_get_nvm_82571(struct wm_softc *sc)
   15704 {
   15705 	int rv;
   15706 
   15707 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15708 		device_xname(sc->sc_dev), __func__));
   15709 
   15710 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   15711 		return rv;
   15712 
   15713 	switch (sc->sc_type) {
   15714 	case WM_T_82573:
   15715 		break;
   15716 	default:
   15717 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15718 			rv = wm_get_eecd(sc);
   15719 		break;
   15720 	}
   15721 
   15722 	if (rv != 0) {
   15723 		aprint_error_dev(sc->sc_dev,
   15724 		    "%s: failed to get semaphore\n",
   15725 		    __func__);
   15726 		wm_put_swsm_semaphore(sc);
   15727 	}
   15728 
   15729 	return rv;
   15730 }
   15731 
   15732 static void
   15733 wm_put_nvm_82571(struct wm_softc *sc)
   15734 {
   15735 
   15736 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15737 		device_xname(sc->sc_dev), __func__));
   15738 
   15739 	switch (sc->sc_type) {
   15740 	case WM_T_82573:
   15741 		break;
   15742 	default:
   15743 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15744 			wm_put_eecd(sc);
   15745 		break;
   15746 	}
   15747 
   15748 	wm_put_swsm_semaphore(sc);
   15749 }
   15750 
   15751 static int
   15752 wm_get_phy_82575(struct wm_softc *sc)
   15753 {
   15754 
   15755 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15756 		device_xname(sc->sc_dev), __func__));
   15757 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15758 }
   15759 
   15760 static void
   15761 wm_put_phy_82575(struct wm_softc *sc)
   15762 {
   15763 
   15764 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15765 		device_xname(sc->sc_dev), __func__));
   15766 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15767 }
   15768 
   15769 static int
   15770 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   15771 {
   15772 	uint32_t ext_ctrl;
   15773 	int timeout = 200;
   15774 
   15775 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15776 		device_xname(sc->sc_dev), __func__));
   15777 
   15778 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15779 	for (timeout = 0; timeout < 200; timeout++) {
   15780 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15781 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15782 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15783 
   15784 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15785 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15786 			return 0;
   15787 		delay(5000);
   15788 	}
   15789 	device_printf(sc->sc_dev,
   15790 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   15791 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15792 	return -1;
   15793 }
   15794 
   15795 static void
   15796 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   15797 {
   15798 	uint32_t ext_ctrl;
   15799 
   15800 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15801 		device_xname(sc->sc_dev), __func__));
   15802 
   15803 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15804 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15805 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15806 
   15807 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15808 }
   15809 
   15810 static int
   15811 wm_get_swflag_ich8lan(struct wm_softc *sc)
   15812 {
   15813 	uint32_t ext_ctrl;
   15814 	int timeout;
   15815 
   15816 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15817 		device_xname(sc->sc_dev), __func__));
   15818 	mutex_enter(sc->sc_ich_phymtx);
   15819 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   15820 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15821 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   15822 			break;
   15823 		delay(1000);
   15824 	}
   15825 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   15826 		device_printf(sc->sc_dev,
   15827 		    "SW has already locked the resource\n");
   15828 		goto out;
   15829 	}
   15830 
   15831 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15832 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15833 	for (timeout = 0; timeout < 1000; timeout++) {
   15834 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15835 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15836 			break;
   15837 		delay(1000);
   15838 	}
   15839 	if (timeout >= 1000) {
   15840 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   15841 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15842 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15843 		goto out;
   15844 	}
   15845 	return 0;
   15846 
   15847 out:
   15848 	mutex_exit(sc->sc_ich_phymtx);
   15849 	return -1;
   15850 }
   15851 
   15852 static void
   15853 wm_put_swflag_ich8lan(struct wm_softc *sc)
   15854 {
   15855 	uint32_t ext_ctrl;
   15856 
   15857 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15858 		device_xname(sc->sc_dev), __func__));
   15859 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15860 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   15861 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15862 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15863 	} else
   15864 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   15865 
   15866 	mutex_exit(sc->sc_ich_phymtx);
   15867 }
   15868 
   15869 static int
   15870 wm_get_nvm_ich8lan(struct wm_softc *sc)
   15871 {
   15872 
   15873 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15874 		device_xname(sc->sc_dev), __func__));
   15875 	mutex_enter(sc->sc_ich_nvmmtx);
   15876 
   15877 	return 0;
   15878 }
   15879 
   15880 static void
   15881 wm_put_nvm_ich8lan(struct wm_softc *sc)
   15882 {
   15883 
   15884 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15885 		device_xname(sc->sc_dev), __func__));
   15886 	mutex_exit(sc->sc_ich_nvmmtx);
   15887 }
   15888 
   15889 static int
   15890 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   15891 {
   15892 	int i = 0;
   15893 	uint32_t reg;
   15894 
   15895 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15896 		device_xname(sc->sc_dev), __func__));
   15897 
   15898 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15899 	do {
   15900 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   15901 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15902 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15903 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   15904 			break;
   15905 		delay(2*1000);
   15906 		i++;
   15907 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   15908 
   15909 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   15910 		wm_put_hw_semaphore_82573(sc);
   15911 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   15912 		    device_xname(sc->sc_dev));
   15913 		return -1;
   15914 	}
   15915 
   15916 	return 0;
   15917 }
   15918 
   15919 static void
   15920 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   15921 {
   15922 	uint32_t reg;
   15923 
   15924 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15925 		device_xname(sc->sc_dev), __func__));
   15926 
   15927 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15928 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15929 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15930 }
   15931 
   15932 /*
   15933  * Management mode and power management related subroutines.
   15934  * BMC, AMT, suspend/resume and EEE.
   15935  */
   15936 
   15937 #ifdef WM_WOL
   15938 static int
   15939 wm_check_mng_mode(struct wm_softc *sc)
   15940 {
   15941 	int rv;
   15942 
   15943 	switch (sc->sc_type) {
   15944 	case WM_T_ICH8:
   15945 	case WM_T_ICH9:
   15946 	case WM_T_ICH10:
   15947 	case WM_T_PCH:
   15948 	case WM_T_PCH2:
   15949 	case WM_T_PCH_LPT:
   15950 	case WM_T_PCH_SPT:
   15951 	case WM_T_PCH_CNP:
   15952 	case WM_T_PCH_TGP:
   15953 		rv = wm_check_mng_mode_ich8lan(sc);
   15954 		break;
   15955 	case WM_T_82574:
   15956 	case WM_T_82583:
   15957 		rv = wm_check_mng_mode_82574(sc);
   15958 		break;
   15959 	case WM_T_82571:
   15960 	case WM_T_82572:
   15961 	case WM_T_82573:
   15962 	case WM_T_80003:
   15963 		rv = wm_check_mng_mode_generic(sc);
   15964 		break;
   15965 	default:
   15966 		/* Noting to do */
   15967 		rv = 0;
   15968 		break;
   15969 	}
   15970 
   15971 	return rv;
   15972 }
   15973 
   15974 static int
   15975 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   15976 {
   15977 	uint32_t fwsm;
   15978 
   15979 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15980 
   15981 	if (((fwsm & FWSM_FW_VALID) != 0)
   15982 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   15983 		return 1;
   15984 
   15985 	return 0;
   15986 }
   15987 
   15988 static int
   15989 wm_check_mng_mode_82574(struct wm_softc *sc)
   15990 {
   15991 	uint16_t data;
   15992 
   15993 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15994 
   15995 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   15996 		return 1;
   15997 
   15998 	return 0;
   15999 }
   16000 
   16001 static int
   16002 wm_check_mng_mode_generic(struct wm_softc *sc)
   16003 {
   16004 	uint32_t fwsm;
   16005 
   16006 	fwsm = CSR_READ(sc, WMREG_FWSM);
   16007 
   16008 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   16009 		return 1;
   16010 
   16011 	return 0;
   16012 }
   16013 #endif /* WM_WOL */
   16014 
   16015 static int
   16016 wm_enable_mng_pass_thru(struct wm_softc *sc)
   16017 {
   16018 	uint32_t manc, fwsm, factps;
   16019 
   16020 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   16021 		return 0;
   16022 
   16023 	manc = CSR_READ(sc, WMREG_MANC);
   16024 
   16025 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   16026 		device_xname(sc->sc_dev), manc));
   16027 	if ((manc & MANC_RECV_TCO_EN) == 0)
   16028 		return 0;
   16029 
   16030 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   16031 		fwsm = CSR_READ(sc, WMREG_FWSM);
   16032 		factps = CSR_READ(sc, WMREG_FACTPS);
   16033 		if (((factps & FACTPS_MNGCG) == 0)
   16034 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   16035 			return 1;
   16036 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   16037 		uint16_t data;
   16038 
   16039 		factps = CSR_READ(sc, WMREG_FACTPS);
   16040 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   16041 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   16042 			device_xname(sc->sc_dev), factps, data));
   16043 		if (((factps & FACTPS_MNGCG) == 0)
   16044 		    && ((data & NVM_CFG2_MNGM_MASK)
   16045 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   16046 			return 1;
   16047 	} else if (((manc & MANC_SMBUS_EN) != 0)
   16048 	    && ((manc & MANC_ASF_EN) == 0))
   16049 		return 1;
   16050 
   16051 	return 0;
   16052 }
   16053 
   16054 static bool
   16055 wm_phy_resetisblocked(struct wm_softc *sc)
   16056 {
   16057 	bool blocked = false;
   16058 	uint32_t reg;
   16059 	int i = 0;
   16060 
   16061 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16062 		device_xname(sc->sc_dev), __func__));
   16063 
   16064 	switch (sc->sc_type) {
   16065 	case WM_T_ICH8:
   16066 	case WM_T_ICH9:
   16067 	case WM_T_ICH10:
   16068 	case WM_T_PCH:
   16069 	case WM_T_PCH2:
   16070 	case WM_T_PCH_LPT:
   16071 	case WM_T_PCH_SPT:
   16072 	case WM_T_PCH_CNP:
   16073 	case WM_T_PCH_TGP:
   16074 		do {
   16075 			reg = CSR_READ(sc, WMREG_FWSM);
   16076 			if ((reg & FWSM_RSPCIPHY) == 0) {
   16077 				blocked = true;
   16078 				delay(10*1000);
   16079 				continue;
   16080 			}
   16081 			blocked = false;
   16082 		} while (blocked && (i++ < 30));
   16083 		return blocked;
   16084 		break;
   16085 	case WM_T_82571:
   16086 	case WM_T_82572:
   16087 	case WM_T_82573:
   16088 	case WM_T_82574:
   16089 	case WM_T_82583:
   16090 	case WM_T_80003:
   16091 		reg = CSR_READ(sc, WMREG_MANC);
   16092 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   16093 			return true;
   16094 		else
   16095 			return false;
   16096 		break;
   16097 	default:
   16098 		/* No problem */
   16099 		break;
   16100 	}
   16101 
   16102 	return false;
   16103 }
   16104 
   16105 static void
   16106 wm_get_hw_control(struct wm_softc *sc)
   16107 {
   16108 	uint32_t reg;
   16109 
   16110 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   16111 		device_xname(sc->sc_dev), __func__));
   16112 
   16113 	if (sc->sc_type == WM_T_82573) {
   16114 		reg = CSR_READ(sc, WMREG_SWSM);
   16115 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   16116 	} else if (sc->sc_type >= WM_T_82571) {
   16117 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16118 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   16119 	}
   16120 }
   16121 
   16122 static void
   16123 wm_release_hw_control(struct wm_softc *sc)
   16124 {
   16125 	uint32_t reg;
   16126 
   16127 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   16128 		device_xname(sc->sc_dev), __func__));
   16129 
   16130 	if (sc->sc_type == WM_T_82573) {
   16131 		reg = CSR_READ(sc, WMREG_SWSM);
   16132 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   16133 	} else if (sc->sc_type >= WM_T_82571) {
   16134 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16135 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   16136 	}
   16137 }
   16138 
   16139 static void
   16140 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   16141 {
   16142 	uint32_t reg;
   16143 
   16144 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16145 		device_xname(sc->sc_dev), __func__));
   16146 
   16147 	if (sc->sc_type < WM_T_PCH2)
   16148 		return;
   16149 
   16150 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   16151 
   16152 	if (gate)
   16153 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   16154 	else
   16155 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   16156 
   16157 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   16158 }
   16159 
   16160 static int
   16161 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   16162 {
   16163 	uint32_t fwsm, reg;
   16164 	int rv;
   16165 
   16166 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16167 		device_xname(sc->sc_dev), __func__));
   16168 
   16169 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   16170 	wm_gate_hw_phy_config_ich8lan(sc, true);
   16171 
   16172 	/* Disable ULP */
   16173 	wm_ulp_disable(sc);
   16174 
   16175 	/* Acquire PHY semaphore */
   16176 	rv = sc->phy.acquire(sc);
   16177 	if (rv != 0) {
   16178 		DPRINTF(sc, WM_DEBUG_INIT,
   16179 		    ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
   16180 		return rv;
   16181 	}
   16182 
   16183 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   16184 	 * inaccessible and resetting the PHY is not blocked, toggle the
   16185 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   16186 	 */
   16187 	fwsm = CSR_READ(sc, WMREG_FWSM);
   16188 	switch (sc->sc_type) {
   16189 	case WM_T_PCH_LPT:
   16190 	case WM_T_PCH_SPT:
   16191 	case WM_T_PCH_CNP:
   16192 	case WM_T_PCH_TGP:
   16193 		if (wm_phy_is_accessible_pchlan(sc))
   16194 			break;
   16195 
   16196 		/* Before toggling LANPHYPC, see if PHY is accessible by
   16197 		 * forcing MAC to SMBus mode first.
   16198 		 */
   16199 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16200 		reg |= CTRL_EXT_FORCE_SMBUS;
   16201 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16202 #if 0
   16203 		/* XXX Isn't this required??? */
   16204 		CSR_WRITE_FLUSH(sc);
   16205 #endif
   16206 		/* Wait 50 milliseconds for MAC to finish any retries
   16207 		 * that it might be trying to perform from previous
   16208 		 * attempts to acknowledge any phy read requests.
   16209 		 */
   16210 		delay(50 * 1000);
   16211 		/* FALLTHROUGH */
   16212 	case WM_T_PCH2:
   16213 		if (wm_phy_is_accessible_pchlan(sc) == true)
   16214 			break;
   16215 		/* FALLTHROUGH */
   16216 	case WM_T_PCH:
   16217 		if (sc->sc_type == WM_T_PCH)
   16218 			if ((fwsm & FWSM_FW_VALID) != 0)
   16219 				break;
   16220 
   16221 		if (wm_phy_resetisblocked(sc) == true) {
   16222 			device_printf(sc->sc_dev, "XXX reset is blocked(2)\n");
   16223 			break;
   16224 		}
   16225 
   16226 		/* Toggle LANPHYPC Value bit */
   16227 		wm_toggle_lanphypc_pch_lpt(sc);
   16228 
   16229 		if (sc->sc_type >= WM_T_PCH_LPT) {
   16230 			if (wm_phy_is_accessible_pchlan(sc) == true)
   16231 				break;
   16232 
   16233 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   16234 			 * so ensure that the MAC is also out of SMBus mode
   16235 			 */
   16236 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16237 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16238 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16239 
   16240 			if (wm_phy_is_accessible_pchlan(sc) == true)
   16241 				break;
   16242 			rv = -1;
   16243 		}
   16244 		break;
   16245 	default:
   16246 		break;
   16247 	}
   16248 
   16249 	/* Release semaphore */
   16250 	sc->phy.release(sc);
   16251 
   16252 	if (rv == 0) {
   16253 		/* Check to see if able to reset PHY.  Print error if not */
   16254 		if (wm_phy_resetisblocked(sc)) {
   16255 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   16256 			goto out;
   16257 		}
   16258 
   16259 		/* Reset the PHY before any access to it.  Doing so, ensures
   16260 		 * that the PHY is in a known good state before we read/write
   16261 		 * PHY registers.  The generic reset is sufficient here,
   16262 		 * because we haven't determined the PHY type yet.
   16263 		 */
   16264 		if (wm_reset_phy(sc) != 0)
   16265 			goto out;
   16266 
   16267 		/* On a successful reset, possibly need to wait for the PHY
   16268 		 * to quiesce to an accessible state before returning control
   16269 		 * to the calling function.  If the PHY does not quiesce, then
   16270 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   16271 		 *  the PHY is in.
   16272 		 */
   16273 		if (wm_phy_resetisblocked(sc))
   16274 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   16275 	}
   16276 
   16277 out:
   16278 	/* Ungate automatic PHY configuration on non-managed 82579 */
   16279 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   16280 		delay(10*1000);
   16281 		wm_gate_hw_phy_config_ich8lan(sc, false);
   16282 	}
   16283 
   16284 	return 0;
   16285 }
   16286 
   16287 static void
   16288 wm_init_manageability(struct wm_softc *sc)
   16289 {
   16290 
   16291 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16292 		device_xname(sc->sc_dev), __func__));
   16293 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
   16294 
   16295 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   16296 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   16297 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   16298 
   16299 		/* Disable hardware interception of ARP */
   16300 		manc &= ~MANC_ARP_EN;
   16301 
   16302 		/* Enable receiving management packets to the host */
   16303 		if (sc->sc_type >= WM_T_82571) {
   16304 			manc |= MANC_EN_MNG2HOST;
   16305 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   16306 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   16307 		}
   16308 
   16309 		CSR_WRITE(sc, WMREG_MANC, manc);
   16310 	}
   16311 }
   16312 
   16313 static void
   16314 wm_release_manageability(struct wm_softc *sc)
   16315 {
   16316 
   16317 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   16318 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   16319 
   16320 		manc |= MANC_ARP_EN;
   16321 		if (sc->sc_type >= WM_T_82571)
   16322 			manc &= ~MANC_EN_MNG2HOST;
   16323 
   16324 		CSR_WRITE(sc, WMREG_MANC, manc);
   16325 	}
   16326 }
   16327 
   16328 static void
   16329 wm_get_wakeup(struct wm_softc *sc)
   16330 {
   16331 
   16332 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   16333 	switch (sc->sc_type) {
   16334 	case WM_T_82573:
   16335 	case WM_T_82583:
   16336 		sc->sc_flags |= WM_F_HAS_AMT;
   16337 		/* FALLTHROUGH */
   16338 	case WM_T_80003:
   16339 	case WM_T_82575:
   16340 	case WM_T_82576:
   16341 	case WM_T_82580:
   16342 	case WM_T_I350:
   16343 	case WM_T_I354:
   16344 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   16345 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   16346 		/* FALLTHROUGH */
   16347 	case WM_T_82541:
   16348 	case WM_T_82541_2:
   16349 	case WM_T_82547:
   16350 	case WM_T_82547_2:
   16351 	case WM_T_82571:
   16352 	case WM_T_82572:
   16353 	case WM_T_82574:
   16354 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   16355 		break;
   16356 	case WM_T_ICH8:
   16357 	case WM_T_ICH9:
   16358 	case WM_T_ICH10:
   16359 	case WM_T_PCH:
   16360 	case WM_T_PCH2:
   16361 	case WM_T_PCH_LPT:
   16362 	case WM_T_PCH_SPT:
   16363 	case WM_T_PCH_CNP:
   16364 	case WM_T_PCH_TGP:
   16365 		sc->sc_flags |= WM_F_HAS_AMT;
   16366 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   16367 		break;
   16368 	default:
   16369 		break;
   16370 	}
   16371 
   16372 	/* 1: HAS_MANAGE */
   16373 	if (wm_enable_mng_pass_thru(sc) != 0)
   16374 		sc->sc_flags |= WM_F_HAS_MANAGE;
   16375 
   16376 	/*
   16377 	 * Note that the WOL flags is set after the resetting of the eeprom
   16378 	 * stuff
   16379 	 */
   16380 }
   16381 
   16382 /*
   16383  * Unconfigure Ultra Low Power mode.
   16384  * Only for I217 and newer (see below).
   16385  */
   16386 static int
   16387 wm_ulp_disable(struct wm_softc *sc)
   16388 {
   16389 	uint32_t reg;
   16390 	uint16_t phyreg;
   16391 	int i = 0, rv;
   16392 
   16393 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16394 		device_xname(sc->sc_dev), __func__));
   16395 	/* Exclude old devices */
   16396 	if ((sc->sc_type < WM_T_PCH_LPT)
   16397 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   16398 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   16399 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   16400 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   16401 		return 0;
   16402 
   16403 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   16404 		/* Request ME un-configure ULP mode in the PHY */
   16405 		reg = CSR_READ(sc, WMREG_H2ME);
   16406 		reg &= ~H2ME_ULP;
   16407 		reg |= H2ME_ENFORCE_SETTINGS;
   16408 		CSR_WRITE(sc, WMREG_H2ME, reg);
   16409 
   16410 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   16411 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   16412 			if (i++ == 30) {
   16413 				device_printf(sc->sc_dev, "%s timed out\n",
   16414 				    __func__);
   16415 				return -1;
   16416 			}
   16417 			delay(10 * 1000);
   16418 		}
   16419 		reg = CSR_READ(sc, WMREG_H2ME);
   16420 		reg &= ~H2ME_ENFORCE_SETTINGS;
   16421 		CSR_WRITE(sc, WMREG_H2ME, reg);
   16422 
   16423 		return 0;
   16424 	}
   16425 
   16426 	/* Acquire semaphore */
   16427 	rv = sc->phy.acquire(sc);
   16428 	if (rv != 0) {
   16429 		DPRINTF(sc, WM_DEBUG_INIT,
   16430 		    ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
   16431 		return rv;
   16432 	}
   16433 
   16434 	/* Toggle LANPHYPC */
   16435 	wm_toggle_lanphypc_pch_lpt(sc);
   16436 
   16437 	/* Unforce SMBus mode in PHY */
   16438 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   16439 	if (rv != 0) {
   16440 		uint32_t reg2;
   16441 
   16442 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   16443 		    __func__);
   16444 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   16445 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   16446 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   16447 		delay(50 * 1000);
   16448 
   16449 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   16450 		    &phyreg);
   16451 		if (rv != 0)
   16452 			goto release;
   16453 	}
   16454 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   16455 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   16456 
   16457 	/* Unforce SMBus mode in MAC */
   16458 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16459 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   16460 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16461 
   16462 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   16463 	if (rv != 0)
   16464 		goto release;
   16465 	phyreg |= HV_PM_CTRL_K1_ENA;
   16466 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   16467 
   16468 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   16469 	    &phyreg);
   16470 	if (rv != 0)
   16471 		goto release;
   16472 	phyreg &= ~(I218_ULP_CONFIG1_IND
   16473 	    | I218_ULP_CONFIG1_STICKY_ULP
   16474 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   16475 	    | I218_ULP_CONFIG1_WOL_HOST
   16476 	    | I218_ULP_CONFIG1_INBAND_EXIT
   16477 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   16478 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   16479 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   16480 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   16481 	phyreg |= I218_ULP_CONFIG1_START;
   16482 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   16483 
   16484 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16485 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   16486 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16487 
   16488 release:
   16489 	/* Release semaphore */
   16490 	sc->phy.release(sc);
   16491 	wm_gmii_reset(sc);
   16492 	delay(50 * 1000);
   16493 
   16494 	return rv;
   16495 }
   16496 
   16497 /* WOL in the newer chipset interfaces (pchlan) */
   16498 static int
   16499 wm_enable_phy_wakeup(struct wm_softc *sc)
   16500 {
   16501 	device_t dev = sc->sc_dev;
   16502 	uint32_t mreg, moff;
   16503 	uint16_t wuce, wuc, wufc, preg;
   16504 	int i, rv;
   16505 
   16506 	KASSERT(sc->sc_type >= WM_T_PCH);
   16507 
   16508 	/* Copy MAC RARs to PHY RARs */
   16509 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   16510 
   16511 	/* Activate PHY wakeup */
   16512 	rv = sc->phy.acquire(sc);
   16513 	if (rv != 0) {
   16514 		device_printf(dev, "%s: failed to acquire semaphore\n",
   16515 		    __func__);
   16516 		return rv;
   16517 	}
   16518 
   16519 	/*
   16520 	 * Enable access to PHY wakeup registers.
   16521 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   16522 	 */
   16523 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   16524 	if (rv != 0) {
   16525 		device_printf(dev,
   16526 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   16527 		goto release;
   16528 	}
   16529 
   16530 	/* Copy MAC MTA to PHY MTA */
   16531 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   16532 		uint16_t lo, hi;
   16533 
   16534 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   16535 		lo = (uint16_t)(mreg & 0xffff);
   16536 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   16537 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   16538 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   16539 	}
   16540 
   16541 	/* Configure PHY Rx Control register */
   16542 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   16543 	mreg = CSR_READ(sc, WMREG_RCTL);
   16544 	if (mreg & RCTL_UPE)
   16545 		preg |= BM_RCTL_UPE;
   16546 	if (mreg & RCTL_MPE)
   16547 		preg |= BM_RCTL_MPE;
   16548 	preg &= ~(BM_RCTL_MO_MASK);
   16549 	moff = __SHIFTOUT(mreg, RCTL_MO);
   16550 	if (moff != 0)
   16551 		preg |= moff << BM_RCTL_MO_SHIFT;
   16552 	if (mreg & RCTL_BAM)
   16553 		preg |= BM_RCTL_BAM;
   16554 	if (mreg & RCTL_PMCF)
   16555 		preg |= BM_RCTL_PMCF;
   16556 	mreg = CSR_READ(sc, WMREG_CTRL);
   16557 	if (mreg & CTRL_RFCE)
   16558 		preg |= BM_RCTL_RFCE;
   16559 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   16560 
   16561 	wuc = WUC_APME | WUC_PME_EN;
   16562 	wufc = WUFC_MAG;
   16563 	/* Enable PHY wakeup in MAC register */
   16564 	CSR_WRITE(sc, WMREG_WUC,
   16565 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   16566 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   16567 
   16568 	/* Configure and enable PHY wakeup in PHY registers */
   16569 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   16570 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   16571 
   16572 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   16573 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   16574 
   16575 release:
   16576 	sc->phy.release(sc);
   16577 
   16578 	return 0;
   16579 }
   16580 
   16581 /* Power down workaround on D3 */
   16582 static void
   16583 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   16584 {
   16585 	uint32_t reg;
   16586 	uint16_t phyreg;
   16587 	int i;
   16588 
   16589 	for (i = 0; i < 2; i++) {
   16590 		/* Disable link */
   16591 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16592 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   16593 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16594 
   16595 		/*
   16596 		 * Call gig speed drop workaround on Gig disable before
   16597 		 * accessing any PHY registers
   16598 		 */
   16599 		if (sc->sc_type == WM_T_ICH8)
   16600 			wm_gig_downshift_workaround_ich8lan(sc);
   16601 
   16602 		/* Write VR power-down enable */
   16603 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   16604 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   16605 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   16606 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   16607 
   16608 		/* Read it back and test */
   16609 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   16610 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   16611 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   16612 			break;
   16613 
   16614 		/* Issue PHY reset and repeat at most one more time */
   16615 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   16616 	}
   16617 }
   16618 
   16619 /*
   16620  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   16621  *  @sc: pointer to the HW structure
   16622  *
   16623  *  During S0 to Sx transition, it is possible the link remains at gig
   16624  *  instead of negotiating to a lower speed.  Before going to Sx, set
   16625  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   16626  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   16627  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   16628  *  needs to be written.
   16629  *  Parts that support (and are linked to a partner which support) EEE in
   16630  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   16631  *  than 10Mbps w/o EEE.
   16632  */
   16633 static void
   16634 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   16635 {
   16636 	device_t dev = sc->sc_dev;
   16637 	struct ethercom *ec = &sc->sc_ethercom;
   16638 	uint32_t phy_ctrl;
   16639 	int rv;
   16640 
   16641 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   16642 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   16643 
   16644 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_TGP));
   16645 
   16646 	if (sc->sc_phytype == WMPHY_I217) {
   16647 		uint16_t devid = sc->sc_pcidevid;
   16648 
   16649 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   16650 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   16651 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   16652 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   16653 		    (sc->sc_type >= WM_T_PCH_SPT))
   16654 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   16655 			    CSR_READ(sc, WMREG_FEXTNVM6)
   16656 			    & ~FEXTNVM6_REQ_PLL_CLK);
   16657 
   16658 		if (sc->phy.acquire(sc) != 0)
   16659 			goto out;
   16660 
   16661 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16662 			uint16_t eee_advert;
   16663 
   16664 			rv = wm_read_emi_reg_locked(dev,
   16665 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   16666 			if (rv)
   16667 				goto release;
   16668 
   16669 			/*
   16670 			 * Disable LPLU if both link partners support 100BaseT
   16671 			 * EEE and 100Full is advertised on both ends of the
   16672 			 * link, and enable Auto Enable LPI since there will
   16673 			 * be no driver to enable LPI while in Sx.
   16674 			 */
   16675 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   16676 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   16677 				uint16_t anar, phy_reg;
   16678 
   16679 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   16680 				    &anar);
   16681 				if (anar & ANAR_TX_FD) {
   16682 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   16683 					    PHY_CTRL_NOND0A_LPLU);
   16684 
   16685 					/* Set Auto Enable LPI after link up */
   16686 					sc->phy.readreg_locked(dev, 2,
   16687 					    I217_LPI_GPIO_CTRL, &phy_reg);
   16688 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16689 					sc->phy.writereg_locked(dev, 2,
   16690 					    I217_LPI_GPIO_CTRL, phy_reg);
   16691 				}
   16692 			}
   16693 		}
   16694 
   16695 		/*
   16696 		 * For i217 Intel Rapid Start Technology support,
   16697 		 * when the system is going into Sx and no manageability engine
   16698 		 * is present, the driver must configure proxy to reset only on
   16699 		 * power good.	LPI (Low Power Idle) state must also reset only
   16700 		 * on power good, as well as the MTA (Multicast table array).
   16701 		 * The SMBus release must also be disabled on LCD reset.
   16702 		 */
   16703 
   16704 		/*
   16705 		 * Enable MTA to reset for Intel Rapid Start Technology
   16706 		 * Support
   16707 		 */
   16708 
   16709 release:
   16710 		sc->phy.release(sc);
   16711 	}
   16712 out:
   16713 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   16714 
   16715 	if (sc->sc_type == WM_T_ICH8)
   16716 		wm_gig_downshift_workaround_ich8lan(sc);
   16717 
   16718 	if (sc->sc_type >= WM_T_PCH) {
   16719 		wm_oem_bits_config_ich8lan(sc, false);
   16720 
   16721 		/* Reset PHY to activate OEM bits on 82577/8 */
   16722 		if (sc->sc_type == WM_T_PCH)
   16723 			wm_reset_phy(sc);
   16724 
   16725 		if (sc->phy.acquire(sc) != 0)
   16726 			return;
   16727 		wm_write_smbus_addr(sc);
   16728 		sc->phy.release(sc);
   16729 	}
   16730 }
   16731 
   16732 /*
   16733  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   16734  *  @sc: pointer to the HW structure
   16735  *
   16736  *  During Sx to S0 transitions on non-managed devices or managed devices
   16737  *  on which PHY resets are not blocked, if the PHY registers cannot be
   16738  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   16739  *  the PHY.
   16740  *  On i217, setup Intel Rapid Start Technology.
   16741  */
   16742 static int
   16743 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   16744 {
   16745 	device_t dev = sc->sc_dev;
   16746 	int rv;
   16747 
   16748 	if (sc->sc_type < WM_T_PCH2)
   16749 		return 0;
   16750 
   16751 	rv = wm_init_phy_workarounds_pchlan(sc);
   16752 	if (rv != 0)
   16753 		return rv;
   16754 
   16755 	/* For i217 Intel Rapid Start Technology support when the system
   16756 	 * is transitioning from Sx and no manageability engine is present
   16757 	 * configure SMBus to restore on reset, disable proxy, and enable
   16758 	 * the reset on MTA (Multicast table array).
   16759 	 */
   16760 	if (sc->sc_phytype == WMPHY_I217) {
   16761 		uint16_t phy_reg;
   16762 
   16763 		rv = sc->phy.acquire(sc);
   16764 		if (rv != 0)
   16765 			return rv;
   16766 
   16767 		/* Clear Auto Enable LPI after link up */
   16768 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   16769 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16770 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   16771 
   16772 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16773 			/* Restore clear on SMB if no manageability engine
   16774 			 * is present
   16775 			 */
   16776 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   16777 			    &phy_reg);
   16778 			if (rv != 0)
   16779 				goto release;
   16780 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   16781 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   16782 
   16783 			/* Disable Proxy */
   16784 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   16785 		}
   16786 		/* Enable reset on MTA */
   16787 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   16788 		if (rv != 0)
   16789 			goto release;
   16790 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   16791 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   16792 
   16793 release:
   16794 		sc->phy.release(sc);
   16795 		return rv;
   16796 	}
   16797 
   16798 	return 0;
   16799 }
   16800 
   16801 static void
   16802 wm_enable_wakeup(struct wm_softc *sc)
   16803 {
   16804 	uint32_t reg, pmreg;
   16805 	pcireg_t pmode;
   16806 	int rv = 0;
   16807 
   16808 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16809 		device_xname(sc->sc_dev), __func__));
   16810 
   16811 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16812 	    &pmreg, NULL) == 0)
   16813 		return;
   16814 
   16815 	if ((sc->sc_flags & WM_F_WOL) == 0)
   16816 		goto pme;
   16817 
   16818 	/* Advertise the wakeup capability */
   16819 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   16820 	    | CTRL_SWDPIN(3));
   16821 
   16822 	/* Keep the laser running on fiber adapters */
   16823 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   16824 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   16825 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16826 		reg |= CTRL_EXT_SWDPIN(3);
   16827 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16828 	}
   16829 
   16830 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   16831 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   16832 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   16833 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP) ||
   16834 	    (sc->sc_type == WM_T_PCH_TGP))
   16835 		wm_suspend_workarounds_ich8lan(sc);
   16836 
   16837 #if 0	/* For the multicast packet */
   16838 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   16839 	reg |= WUFC_MC;
   16840 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   16841 #endif
   16842 
   16843 	if (sc->sc_type >= WM_T_PCH) {
   16844 		rv = wm_enable_phy_wakeup(sc);
   16845 		if (rv != 0)
   16846 			goto pme;
   16847 	} else {
   16848 		/* Enable wakeup by the MAC */
   16849 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   16850 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   16851 	}
   16852 
   16853 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   16854 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   16855 		|| (sc->sc_type == WM_T_PCH2))
   16856 	    && (sc->sc_phytype == WMPHY_IGP_3))
   16857 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   16858 
   16859 pme:
   16860 	/* Request PME */
   16861 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   16862 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   16863 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   16864 		/* For WOL */
   16865 		pmode |= PCI_PMCSR_PME_EN;
   16866 	} else {
   16867 		/* Disable WOL */
   16868 		pmode &= ~PCI_PMCSR_PME_EN;
   16869 	}
   16870 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   16871 }
   16872 
   16873 /* Disable ASPM L0s and/or L1 for workaround */
   16874 static void
   16875 wm_disable_aspm(struct wm_softc *sc)
   16876 {
   16877 	pcireg_t reg, mask = 0;
   16878 	unsigned const char *str = "";
   16879 
   16880 	/*
   16881 	 *  Only for PCIe device which has PCIe capability in the PCI config
   16882 	 * space.
   16883 	 */
   16884 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   16885 		return;
   16886 
   16887 	switch (sc->sc_type) {
   16888 	case WM_T_82571:
   16889 	case WM_T_82572:
   16890 		/*
   16891 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   16892 		 * State Power management L1 State (ASPM L1).
   16893 		 */
   16894 		mask = PCIE_LCSR_ASPM_L1;
   16895 		str = "L1 is";
   16896 		break;
   16897 	case WM_T_82573:
   16898 	case WM_T_82574:
   16899 	case WM_T_82583:
   16900 		/*
   16901 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   16902 		 *
   16903 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   16904 		 * some chipset.  The document of 82574 and 82583 says that
   16905 		 * disabling L0s with some specific chipset is sufficient,
   16906 		 * but we follow as of the Intel em driver does.
   16907 		 *
   16908 		 * References:
   16909 		 * Errata 8 of the Specification Update of i82573.
   16910 		 * Errata 20 of the Specification Update of i82574.
   16911 		 * Errata 9 of the Specification Update of i82583.
   16912 		 */
   16913 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   16914 		str = "L0s and L1 are";
   16915 		break;
   16916 	default:
   16917 		return;
   16918 	}
   16919 
   16920 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16921 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   16922 	reg &= ~mask;
   16923 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16924 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   16925 
   16926 	/* Print only in wm_attach() */
   16927 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   16928 		aprint_verbose_dev(sc->sc_dev,
   16929 		    "ASPM %s disabled to workaround the errata.\n", str);
   16930 }
   16931 
   16932 /* LPLU */
   16933 
   16934 static void
   16935 wm_lplu_d0_disable(struct wm_softc *sc)
   16936 {
   16937 	struct mii_data *mii = &sc->sc_mii;
   16938 	uint32_t reg;
   16939 	uint16_t phyval;
   16940 
   16941 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16942 		device_xname(sc->sc_dev), __func__));
   16943 
   16944 	if (sc->sc_phytype == WMPHY_IFE)
   16945 		return;
   16946 
   16947 	switch (sc->sc_type) {
   16948 	case WM_T_82571:
   16949 	case WM_T_82572:
   16950 	case WM_T_82573:
   16951 	case WM_T_82575:
   16952 	case WM_T_82576:
   16953 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
   16954 		phyval &= ~PMR_D0_LPLU;
   16955 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
   16956 		break;
   16957 	case WM_T_82580:
   16958 	case WM_T_I350:
   16959 	case WM_T_I210:
   16960 	case WM_T_I211:
   16961 		reg = CSR_READ(sc, WMREG_PHPM);
   16962 		reg &= ~PHPM_D0A_LPLU;
   16963 		CSR_WRITE(sc, WMREG_PHPM, reg);
   16964 		break;
   16965 	case WM_T_82574:
   16966 	case WM_T_82583:
   16967 	case WM_T_ICH8:
   16968 	case WM_T_ICH9:
   16969 	case WM_T_ICH10:
   16970 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16971 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   16972 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16973 		CSR_WRITE_FLUSH(sc);
   16974 		break;
   16975 	case WM_T_PCH:
   16976 	case WM_T_PCH2:
   16977 	case WM_T_PCH_LPT:
   16978 	case WM_T_PCH_SPT:
   16979 	case WM_T_PCH_CNP:
   16980 	case WM_T_PCH_TGP:
   16981 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   16982 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   16983 		if (wm_phy_resetisblocked(sc) == false)
   16984 			phyval |= HV_OEM_BITS_ANEGNOW;
   16985 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   16986 		break;
   16987 	default:
   16988 		break;
   16989 	}
   16990 }
   16991 
   16992 /* EEE */
   16993 
   16994 static int
   16995 wm_set_eee_i350(struct wm_softc *sc)
   16996 {
   16997 	struct ethercom *ec = &sc->sc_ethercom;
   16998 	uint32_t ipcnfg, eeer;
   16999 	uint32_t ipcnfg_mask
   17000 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   17001 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   17002 
   17003 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   17004 
   17005 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   17006 	eeer = CSR_READ(sc, WMREG_EEER);
   17007 
   17008 	/* Enable or disable per user setting */
   17009 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   17010 		ipcnfg |= ipcnfg_mask;
   17011 		eeer |= eeer_mask;
   17012 	} else {
   17013 		ipcnfg &= ~ipcnfg_mask;
   17014 		eeer &= ~eeer_mask;
   17015 	}
   17016 
   17017 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   17018 	CSR_WRITE(sc, WMREG_EEER, eeer);
   17019 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   17020 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   17021 
   17022 	return 0;
   17023 }
   17024 
   17025 static int
   17026 wm_set_eee_pchlan(struct wm_softc *sc)
   17027 {
   17028 	device_t dev = sc->sc_dev;
   17029 	struct ethercom *ec = &sc->sc_ethercom;
   17030 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   17031 	int rv;
   17032 
   17033 	switch (sc->sc_phytype) {
   17034 	case WMPHY_82579:
   17035 		lpa = I82579_EEE_LP_ABILITY;
   17036 		pcs_status = I82579_EEE_PCS_STATUS;
   17037 		adv_addr = I82579_EEE_ADVERTISEMENT;
   17038 		break;
   17039 	case WMPHY_I217:
   17040 		lpa = I217_EEE_LP_ABILITY;
   17041 		pcs_status = I217_EEE_PCS_STATUS;
   17042 		adv_addr = I217_EEE_ADVERTISEMENT;
   17043 		break;
   17044 	default:
   17045 		return 0;
   17046 	}
   17047 
   17048 	rv = sc->phy.acquire(sc);
   17049 	if (rv != 0) {
   17050 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   17051 		return rv;
   17052 	}
   17053 
   17054 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   17055 	if (rv != 0)
   17056 		goto release;
   17057 
   17058 	/* Clear bits that enable EEE in various speeds */
   17059 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   17060 
   17061 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   17062 		/* Save off link partner's EEE ability */
   17063 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   17064 		if (rv != 0)
   17065 			goto release;
   17066 
   17067 		/* Read EEE advertisement */
   17068 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   17069 			goto release;
   17070 
   17071 		/*
   17072 		 * Enable EEE only for speeds in which the link partner is
   17073 		 * EEE capable and for which we advertise EEE.
   17074 		 */
   17075 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   17076 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   17077 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   17078 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   17079 			if ((data & ANLPAR_TX_FD) != 0)
   17080 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   17081 			else {
   17082 				/*
   17083 				 * EEE is not supported in 100Half, so ignore
   17084 				 * partner's EEE in 100 ability if full-duplex
   17085 				 * is not advertised.
   17086 				 */
   17087 				sc->eee_lp_ability
   17088 				    &= ~AN_EEEADVERT_100_TX;
   17089 			}
   17090 		}
   17091 	}
   17092 
   17093 	if (sc->sc_phytype == WMPHY_82579) {
   17094 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   17095 		if (rv != 0)
   17096 			goto release;
   17097 
   17098 		data &= ~I82579_LPI_PLL_SHUT_100;
   17099 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   17100 	}
   17101 
   17102 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   17103 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   17104 		goto release;
   17105 
   17106 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   17107 release:
   17108 	sc->phy.release(sc);
   17109 
   17110 	return rv;
   17111 }
   17112 
   17113 static int
   17114 wm_set_eee(struct wm_softc *sc)
   17115 {
   17116 	struct ethercom *ec = &sc->sc_ethercom;
   17117 
   17118 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   17119 		return 0;
   17120 
   17121 	if (sc->sc_type == WM_T_I354) {
   17122 		/* I354 uses an external PHY */
   17123 		return 0; /* not yet */
   17124 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   17125 		return wm_set_eee_i350(sc);
   17126 	else if (sc->sc_type >= WM_T_PCH2)
   17127 		return wm_set_eee_pchlan(sc);
   17128 
   17129 	return 0;
   17130 }
   17131 
   17132 /*
   17133  * Workarounds (mainly PHY related).
   17134  * Basically, PHY's workarounds are in the PHY drivers.
   17135  */
   17136 
   17137 /* Workaround for 82566 Kumeran PCS lock loss */
   17138 static int
   17139 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   17140 {
   17141 	struct mii_data *mii = &sc->sc_mii;
   17142 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   17143 	int i, reg, rv;
   17144 	uint16_t phyreg;
   17145 
   17146 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17147 		device_xname(sc->sc_dev), __func__));
   17148 
   17149 	/* If the link is not up, do nothing */
   17150 	if ((status & STATUS_LU) == 0)
   17151 		return 0;
   17152 
   17153 	/* Nothing to do if the link is other than 1Gbps */
   17154 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   17155 		return 0;
   17156 
   17157 	for (i = 0; i < 10; i++) {
   17158 		/* read twice */
   17159 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   17160 		if (rv != 0)
   17161 			return rv;
   17162 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   17163 		if (rv != 0)
   17164 			return rv;
   17165 
   17166 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   17167 			goto out;	/* GOOD! */
   17168 
   17169 		/* Reset the PHY */
   17170 		wm_reset_phy(sc);
   17171 		delay(5*1000);
   17172 	}
   17173 
   17174 	/* Disable GigE link negotiation */
   17175 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   17176 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   17177 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   17178 
   17179 	/*
   17180 	 * Call gig speed drop workaround on Gig disable before accessing
   17181 	 * any PHY registers.
   17182 	 */
   17183 	wm_gig_downshift_workaround_ich8lan(sc);
   17184 
   17185 out:
   17186 	return 0;
   17187 }
   17188 
   17189 /*
   17190  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   17191  *  @sc: pointer to the HW structure
   17192  *
   17193  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   17194  *  LPLU, Gig disable, MDIC PHY reset):
   17195  *    1) Set Kumeran Near-end loopback
   17196  *    2) Clear Kumeran Near-end loopback
   17197  *  Should only be called for ICH8[m] devices with any 1G Phy.
   17198  */
   17199 static void
   17200 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   17201 {
   17202 	uint16_t kmreg;
   17203 
   17204 	/* Only for igp3 */
   17205 	if (sc->sc_phytype == WMPHY_IGP_3) {
   17206 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   17207 			return;
   17208 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   17209 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   17210 			return;
   17211 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   17212 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   17213 	}
   17214 }
   17215 
   17216 /*
   17217  * Workaround for pch's PHYs
   17218  * XXX should be moved to new PHY driver?
   17219  */
   17220 static int
   17221 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   17222 {
   17223 	device_t dev = sc->sc_dev;
   17224 	struct mii_data *mii = &sc->sc_mii;
   17225 	struct mii_softc *child;
   17226 	uint16_t phy_data, phyrev = 0;
   17227 	int phytype = sc->sc_phytype;
   17228 	int rv;
   17229 
   17230 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17231 		device_xname(dev), __func__));
   17232 	KASSERT(sc->sc_type == WM_T_PCH);
   17233 
   17234 	/* Set MDIO slow mode before any other MDIO access */
   17235 	if (phytype == WMPHY_82577)
   17236 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   17237 			return rv;
   17238 
   17239 	child = LIST_FIRST(&mii->mii_phys);
   17240 	if (child != NULL)
   17241 		phyrev = child->mii_mpd_rev;
   17242 
   17243 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   17244 	if ((child != NULL) &&
   17245 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   17246 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   17247 		/* Disable generation of early preamble (0x4431) */
   17248 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   17249 		    &phy_data);
   17250 		if (rv != 0)
   17251 			return rv;
   17252 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   17253 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   17254 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   17255 		    phy_data);
   17256 		if (rv != 0)
   17257 			return rv;
   17258 
   17259 		/* Preamble tuning for SSC */
   17260 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   17261 		if (rv != 0)
   17262 			return rv;
   17263 	}
   17264 
   17265 	/* 82578 */
   17266 	if (phytype == WMPHY_82578) {
   17267 		/*
   17268 		 * Return registers to default by doing a soft reset then
   17269 		 * writing 0x3140 to the control register
   17270 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   17271 		 */
   17272 		if ((child != NULL) && (phyrev < 2)) {
   17273 			PHY_RESET(child);
   17274 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   17275 			if (rv != 0)
   17276 				return rv;
   17277 		}
   17278 	}
   17279 
   17280 	/* Select page 0 */
   17281 	if ((rv = sc->phy.acquire(sc)) != 0)
   17282 		return rv;
   17283 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
   17284 	sc->phy.release(sc);
   17285 	if (rv != 0)
   17286 		return rv;
   17287 
   17288 	/*
   17289 	 * Configure the K1 Si workaround during phy reset assuming there is
   17290 	 * link so that it disables K1 if link is in 1Gbps.
   17291 	 */
   17292 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   17293 		return rv;
   17294 
   17295 	/* Workaround for link disconnects on a busy hub in half duplex */
   17296 	rv = sc->phy.acquire(sc);
   17297 	if (rv)
   17298 		return rv;
   17299 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   17300 	if (rv)
   17301 		goto release;
   17302 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   17303 	    phy_data & 0x00ff);
   17304 	if (rv)
   17305 		goto release;
   17306 
   17307 	/* Set MSE higher to enable link to stay up when noise is high */
   17308 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   17309 release:
   17310 	sc->phy.release(sc);
   17311 
   17312 	return rv;
   17313 }
   17314 
   17315 /*
   17316  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   17317  *  @sc:   pointer to the HW structure
   17318  */
   17319 static void
   17320 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   17321 {
   17322 
   17323 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17324 		device_xname(sc->sc_dev), __func__));
   17325 
   17326 	if (sc->phy.acquire(sc) != 0)
   17327 		return;
   17328 
   17329 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   17330 
   17331 	sc->phy.release(sc);
   17332 }
   17333 
   17334 static void
   17335 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
   17336 {
   17337 	device_t dev = sc->sc_dev;
   17338 	uint32_t mac_reg;
   17339 	uint16_t i, wuce;
   17340 	int count;
   17341 
   17342 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17343 		device_xname(dev), __func__));
   17344 
   17345 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   17346 		return;
   17347 
   17348 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   17349 	count = wm_rar_count(sc);
   17350 	for (i = 0; i < count; i++) {
   17351 		uint16_t lo, hi;
   17352 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   17353 		lo = (uint16_t)(mac_reg & 0xffff);
   17354 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   17355 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   17356 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   17357 
   17358 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   17359 		lo = (uint16_t)(mac_reg & 0xffff);
   17360 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   17361 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   17362 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   17363 	}
   17364 
   17365 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   17366 }
   17367 
   17368 /*
   17369  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
   17370  *  with 82579 PHY
   17371  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
   17372  */
   17373 static int
   17374 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
   17375 {
   17376 	device_t dev = sc->sc_dev;
   17377 	int rar_count;
   17378 	int rv;
   17379 	uint32_t mac_reg;
   17380 	uint16_t dft_ctrl, data;
   17381 	uint16_t i;
   17382 
   17383 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17384 		device_xname(dev), __func__));
   17385 
   17386 	if (sc->sc_type < WM_T_PCH2)
   17387 		return 0;
   17388 
   17389 	/* Acquire PHY semaphore */
   17390 	rv = sc->phy.acquire(sc);
   17391 	if (rv != 0)
   17392 		return rv;
   17393 
   17394 	/* Disable Rx path while enabling/disabling workaround */
   17395 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
   17396 	if (rv != 0)
   17397 		goto out;
   17398 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   17399 	    dft_ctrl | (1 << 14));
   17400 	if (rv != 0)
   17401 		goto out;
   17402 
   17403 	if (enable) {
   17404 		/* Write Rx addresses (rar_entry_count for RAL/H, and
   17405 		 * SHRAL/H) and initial CRC values to the MAC
   17406 		 */
   17407 		rar_count = wm_rar_count(sc);
   17408 		for (i = 0; i < rar_count; i++) {
   17409 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
   17410 			uint32_t addr_high, addr_low;
   17411 
   17412 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   17413 			if (!(addr_high & RAL_AV))
   17414 				continue;
   17415 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   17416 			mac_addr[0] = (addr_low & 0xFF);
   17417 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
   17418 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
   17419 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
   17420 			mac_addr[4] = (addr_high & 0xFF);
   17421 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
   17422 
   17423 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
   17424 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
   17425 		}
   17426 
   17427 		/* Write Rx addresses to the PHY */
   17428 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   17429 	}
   17430 
   17431 	/*
   17432 	 * If enable ==
   17433 	 *	true: Enable jumbo frame workaround in the MAC.
   17434 	 *	false: Write MAC register values back to h/w defaults.
   17435 	 */
   17436 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
   17437 	if (enable) {
   17438 		mac_reg &= ~(1 << 14);
   17439 		mac_reg |= (7 << 15);
   17440 	} else
   17441 		mac_reg &= ~(0xf << 14);
   17442 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
   17443 
   17444 	mac_reg = CSR_READ(sc, WMREG_RCTL);
   17445 	if (enable) {
   17446 		mac_reg |= RCTL_SECRC;
   17447 		sc->sc_rctl |= RCTL_SECRC;
   17448 		sc->sc_flags |= WM_F_CRC_STRIP;
   17449 	} else {
   17450 		mac_reg &= ~RCTL_SECRC;
   17451 		sc->sc_rctl &= ~RCTL_SECRC;
   17452 		sc->sc_flags &= ~WM_F_CRC_STRIP;
   17453 	}
   17454 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
   17455 
   17456 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
   17457 	if (rv != 0)
   17458 		goto out;
   17459 	if (enable)
   17460 		data |= 1 << 0;
   17461 	else
   17462 		data &= ~(1 << 0);
   17463 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
   17464 	if (rv != 0)
   17465 		goto out;
   17466 
   17467 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
   17468 	if (rv != 0)
   17469 		goto out;
   17470 	/*
   17471 	 * XXX FreeBSD and Linux do the same thing that they set the same value
   17472 	 * on both the enable case and the disable case. Is it correct?
   17473 	 */
   17474 	data &= ~(0xf << 8);
   17475 	data |= (0xb << 8);
   17476 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
   17477 	if (rv != 0)
   17478 		goto out;
   17479 
   17480 	/*
   17481 	 * If enable ==
   17482 	 *	true: Enable jumbo frame workaround in the PHY.
   17483 	 *	false: Write PHY register values back to h/w defaults.
   17484 	 */
   17485 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
   17486 	if (rv != 0)
   17487 		goto out;
   17488 	data &= ~(0x7F << 5);
   17489 	if (enable)
   17490 		data |= (0x37 << 5);
   17491 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
   17492 	if (rv != 0)
   17493 		goto out;
   17494 
   17495 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
   17496 	if (rv != 0)
   17497 		goto out;
   17498 	if (enable)
   17499 		data &= ~(1 << 13);
   17500 	else
   17501 		data |= (1 << 13);
   17502 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
   17503 	if (rv != 0)
   17504 		goto out;
   17505 
   17506 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
   17507 	if (rv != 0)
   17508 		goto out;
   17509 	data &= ~(0x3FF << 2);
   17510 	if (enable)
   17511 		data |= (I82579_TX_PTR_GAP << 2);
   17512 	else
   17513 		data |= (0x8 << 2);
   17514 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
   17515 	if (rv != 0)
   17516 		goto out;
   17517 
   17518 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
   17519 	    enable ? 0xf100 : 0x7e00);
   17520 	if (rv != 0)
   17521 		goto out;
   17522 
   17523 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
   17524 	if (rv != 0)
   17525 		goto out;
   17526 	if (enable)
   17527 		data |= 1 << 10;
   17528 	else
   17529 		data &= ~(1 << 10);
   17530 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
   17531 	if (rv != 0)
   17532 		goto out;
   17533 
   17534 	/* Re-enable Rx path after enabling/disabling workaround */
   17535 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   17536 	    dft_ctrl & ~(1 << 14));
   17537 
   17538 out:
   17539 	sc->phy.release(sc);
   17540 
   17541 	return rv;
   17542 }
   17543 
   17544 /*
   17545  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   17546  *  done after every PHY reset.
   17547  */
   17548 static int
   17549 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   17550 {
   17551 	device_t dev = sc->sc_dev;
   17552 	int rv;
   17553 
   17554 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17555 		device_xname(dev), __func__));
   17556 	KASSERT(sc->sc_type == WM_T_PCH2);
   17557 
   17558 	/* Set MDIO slow mode before any other MDIO access */
   17559 	rv = wm_set_mdio_slow_mode_hv(sc);
   17560 	if (rv != 0)
   17561 		return rv;
   17562 
   17563 	rv = sc->phy.acquire(sc);
   17564 	if (rv != 0)
   17565 		return rv;
   17566 	/* Set MSE higher to enable link to stay up when noise is high */
   17567 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   17568 	if (rv != 0)
   17569 		goto release;
   17570 	/* Drop link after 5 times MSE threshold was reached */
   17571 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   17572 release:
   17573 	sc->phy.release(sc);
   17574 
   17575 	return rv;
   17576 }
   17577 
   17578 /**
   17579  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   17580  *  @link: link up bool flag
   17581  *
   17582  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   17583  *  preventing further DMA write requests.  Workaround the issue by disabling
   17584  *  the de-assertion of the clock request when in 1Gpbs mode.
   17585  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   17586  *  speeds in order to avoid Tx hangs.
   17587  **/
   17588 static int
   17589 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   17590 {
   17591 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   17592 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   17593 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   17594 	uint16_t phyreg;
   17595 
   17596 	if (link && (speed == STATUS_SPEED_1000)) {
   17597 		int rv;
   17598 
   17599 		rv = sc->phy.acquire(sc);
   17600 		if (rv != 0)
   17601 			return rv;
   17602 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17603 		    &phyreg);
   17604 		if (rv != 0)
   17605 			goto release;
   17606 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17607 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   17608 		if (rv != 0)
   17609 			goto release;
   17610 		delay(20);
   17611 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   17612 
   17613 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17614 		    &phyreg);
   17615 release:
   17616 		sc->phy.release(sc);
   17617 		return rv;
   17618 	}
   17619 
   17620 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   17621 
   17622 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   17623 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   17624 	    || !link
   17625 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   17626 		goto update_fextnvm6;
   17627 
   17628 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   17629 
   17630 	/* Clear link status transmit timeout */
   17631 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   17632 	if (speed == STATUS_SPEED_100) {
   17633 		/* Set inband Tx timeout to 5x10us for 100Half */
   17634 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   17635 
   17636 		/* Do not extend the K1 entry latency for 100Half */
   17637 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   17638 	} else {
   17639 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   17640 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   17641 
   17642 		/* Extend the K1 entry latency for 10 Mbps */
   17643 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   17644 	}
   17645 
   17646 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   17647 
   17648 update_fextnvm6:
   17649 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   17650 	return 0;
   17651 }
   17652 
   17653 /*
   17654  *  wm_k1_gig_workaround_hv - K1 Si workaround
   17655  *  @sc:   pointer to the HW structure
   17656  *  @link: link up bool flag
   17657  *
   17658  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   17659  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   17660  *  If link is down, the function will restore the default K1 setting located
   17661  *  in the NVM.
   17662  */
   17663 static int
   17664 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   17665 {
   17666 	int k1_enable = sc->sc_nvm_k1_enabled;
   17667 	int rv;
   17668 
   17669 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17670 		device_xname(sc->sc_dev), __func__));
   17671 
   17672 	rv = sc->phy.acquire(sc);
   17673 	if (rv != 0)
   17674 		return rv;
   17675 
   17676 	if (link) {
   17677 		k1_enable = 0;
   17678 
   17679 		/* Link stall fix for link up */
   17680 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   17681 		    0x0100);
   17682 	} else {
   17683 		/* Link stall fix for link down */
   17684 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   17685 		    0x4100);
   17686 	}
   17687 
   17688 	wm_configure_k1_ich8lan(sc, k1_enable);
   17689 	sc->phy.release(sc);
   17690 
   17691 	return 0;
   17692 }
   17693 
   17694 /*
   17695  *  wm_k1_workaround_lv - K1 Si workaround
   17696  *  @sc:   pointer to the HW structure
   17697  *
   17698  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   17699  *  Disable K1 for 1000 and 100 speeds
   17700  */
   17701 static int
   17702 wm_k1_workaround_lv(struct wm_softc *sc)
   17703 {
   17704 	uint32_t reg;
   17705 	uint16_t phyreg;
   17706 	int rv;
   17707 
   17708 	if (sc->sc_type != WM_T_PCH2)
   17709 		return 0;
   17710 
   17711 	/* Set K1 beacon duration based on 10Mbps speed */
   17712 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   17713 	if (rv != 0)
   17714 		return rv;
   17715 
   17716 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   17717 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   17718 		if (phyreg &
   17719 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   17720 			/* LV 1G/100 Packet drop issue wa  */
   17721 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   17722 			    &phyreg);
   17723 			if (rv != 0)
   17724 				return rv;
   17725 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   17726 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   17727 			    phyreg);
   17728 			if (rv != 0)
   17729 				return rv;
   17730 		} else {
   17731 			/* For 10Mbps */
   17732 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   17733 			reg &= ~FEXTNVM4_BEACON_DURATION;
   17734 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   17735 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   17736 		}
   17737 	}
   17738 
   17739 	return 0;
   17740 }
   17741 
   17742 /*
   17743  *  wm_link_stall_workaround_hv - Si workaround
   17744  *  @sc: pointer to the HW structure
   17745  *
   17746  *  This function works around a Si bug where the link partner can get
   17747  *  a link up indication before the PHY does. If small packets are sent
   17748  *  by the link partner they can be placed in the packet buffer without
   17749  *  being properly accounted for by the PHY and will stall preventing
   17750  *  further packets from being received.  The workaround is to clear the
   17751  *  packet buffer after the PHY detects link up.
   17752  */
   17753 static int
   17754 wm_link_stall_workaround_hv(struct wm_softc *sc)
   17755 {
   17756 	uint16_t phyreg;
   17757 
   17758 	if (sc->sc_phytype != WMPHY_82578)
   17759 		return 0;
   17760 
   17761 	/* Do not apply workaround if in PHY loopback bit 14 set */
   17762 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   17763 	if ((phyreg & BMCR_LOOP) != 0)
   17764 		return 0;
   17765 
   17766 	/* Check if link is up and at 1Gbps */
   17767 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   17768 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17769 	    | BM_CS_STATUS_SPEED_MASK;
   17770 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17771 		| BM_CS_STATUS_SPEED_1000))
   17772 		return 0;
   17773 
   17774 	delay(200 * 1000);	/* XXX too big */
   17775 
   17776 	/* Flush the packets in the fifo buffer */
   17777 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17778 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   17779 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17780 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   17781 
   17782 	return 0;
   17783 }
   17784 
   17785 static int
   17786 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   17787 {
   17788 	int rv;
   17789 
   17790 	rv = sc->phy.acquire(sc);
   17791 	if (rv != 0) {
   17792 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   17793 		    __func__);
   17794 		return rv;
   17795 	}
   17796 
   17797 	rv = wm_set_mdio_slow_mode_hv_locked(sc);
   17798 
   17799 	sc->phy.release(sc);
   17800 
   17801 	return rv;
   17802 }
   17803 
   17804 static int
   17805 wm_set_mdio_slow_mode_hv_locked(struct wm_softc *sc)
   17806 {
   17807 	int rv;
   17808 	uint16_t reg;
   17809 
   17810 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   17811 	if (rv != 0)
   17812 		return rv;
   17813 
   17814 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   17815 	    reg | HV_KMRN_MDIO_SLOW);
   17816 }
   17817 
   17818 /*
   17819  *  wm_configure_k1_ich8lan - Configure K1 power state
   17820  *  @sc: pointer to the HW structure
   17821  *  @enable: K1 state to configure
   17822  *
   17823  *  Configure the K1 power state based on the provided parameter.
   17824  *  Assumes semaphore already acquired.
   17825  */
   17826 static void
   17827 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   17828 {
   17829 	uint32_t ctrl, ctrl_ext, tmp;
   17830 	uint16_t kmreg;
   17831 	int rv;
   17832 
   17833 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17834 
   17835 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   17836 	if (rv != 0)
   17837 		return;
   17838 
   17839 	if (k1_enable)
   17840 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   17841 	else
   17842 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   17843 
   17844 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   17845 	if (rv != 0)
   17846 		return;
   17847 
   17848 	delay(20);
   17849 
   17850 	ctrl = CSR_READ(sc, WMREG_CTRL);
   17851 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   17852 
   17853 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   17854 	tmp |= CTRL_FRCSPD;
   17855 
   17856 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   17857 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   17858 	CSR_WRITE_FLUSH(sc);
   17859 	delay(20);
   17860 
   17861 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   17862 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   17863 	CSR_WRITE_FLUSH(sc);
   17864 	delay(20);
   17865 
   17866 	return;
   17867 }
   17868 
   17869 /* special case - for 82575 - need to do manual init ... */
   17870 static void
   17871 wm_reset_init_script_82575(struct wm_softc *sc)
   17872 {
   17873 	/*
   17874 	 * Remark: this is untested code - we have no board without EEPROM
   17875 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   17876 	 */
   17877 
   17878 	/* SerDes configuration via SERDESCTRL */
   17879 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   17880 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   17881 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   17882 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   17883 
   17884 	/* CCM configuration via CCMCTL register */
   17885 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   17886 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   17887 
   17888 	/* PCIe lanes configuration */
   17889 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   17890 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   17891 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   17892 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   17893 
   17894 	/* PCIe PLL Configuration */
   17895 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   17896 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   17897 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   17898 }
   17899 
   17900 static void
   17901 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   17902 {
   17903 	uint32_t reg;
   17904 	uint16_t nvmword;
   17905 	int rv;
   17906 
   17907 	if (sc->sc_type != WM_T_82580)
   17908 		return;
   17909 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   17910 		return;
   17911 
   17912 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   17913 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   17914 	if (rv != 0) {
   17915 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   17916 		    __func__);
   17917 		return;
   17918 	}
   17919 
   17920 	reg = CSR_READ(sc, WMREG_MDICNFG);
   17921 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   17922 		reg |= MDICNFG_DEST;
   17923 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   17924 		reg |= MDICNFG_COM_MDIO;
   17925 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17926 }
   17927 
   17928 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   17929 
   17930 static bool
   17931 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   17932 {
   17933 	uint32_t reg;
   17934 	uint16_t id1, id2;
   17935 	int i, rv;
   17936 
   17937 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17938 		device_xname(sc->sc_dev), __func__));
   17939 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17940 
   17941 	id1 = id2 = 0xffff;
   17942 	for (i = 0; i < 2; i++) {
   17943 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   17944 		    &id1);
   17945 		if ((rv != 0) || MII_INVALIDID(id1))
   17946 			continue;
   17947 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   17948 		    &id2);
   17949 		if ((rv != 0) || MII_INVALIDID(id2))
   17950 			continue;
   17951 		break;
   17952 	}
   17953 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   17954 		goto out;
   17955 
   17956 	/*
   17957 	 * In case the PHY needs to be in mdio slow mode,
   17958 	 * set slow mode and try to get the PHY id again.
   17959 	 */
   17960 	rv = 0;
   17961 	if (sc->sc_type < WM_T_PCH_LPT) {
   17962 		wm_set_mdio_slow_mode_hv_locked(sc);
   17963 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   17964 		    &id1);
   17965 		rv |= wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   17966 		    &id2);
   17967 	}
   17968 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   17969 		device_printf(sc->sc_dev, "XXX return with false\n");
   17970 		return false;
   17971 	}
   17972 out:
   17973 	if (sc->sc_type >= WM_T_PCH_LPT) {
   17974 		/* Only unforce SMBus if ME is not active */
   17975 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   17976 			uint16_t phyreg;
   17977 
   17978 			/* Unforce SMBus mode in PHY */
   17979 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   17980 			    CV_SMB_CTRL, &phyreg);
   17981 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   17982 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   17983 			    CV_SMB_CTRL, phyreg);
   17984 
   17985 			/* Unforce SMBus mode in MAC */
   17986 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17987 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   17988 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17989 		}
   17990 	}
   17991 	return true;
   17992 }
   17993 
   17994 static void
   17995 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   17996 {
   17997 	uint32_t reg;
   17998 	int i;
   17999 
   18000 	/* Set PHY Config Counter to 50msec */
   18001 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   18002 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   18003 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   18004 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   18005 
   18006 	/* Toggle LANPHYPC */
   18007 	reg = CSR_READ(sc, WMREG_CTRL);
   18008 	reg |= CTRL_LANPHYPC_OVERRIDE;
   18009 	reg &= ~CTRL_LANPHYPC_VALUE;
   18010 	CSR_WRITE(sc, WMREG_CTRL, reg);
   18011 	CSR_WRITE_FLUSH(sc);
   18012 	delay(1000);
   18013 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   18014 	CSR_WRITE(sc, WMREG_CTRL, reg);
   18015 	CSR_WRITE_FLUSH(sc);
   18016 
   18017 	if (sc->sc_type < WM_T_PCH_LPT)
   18018 		delay(50 * 1000);
   18019 	else {
   18020 		i = 20;
   18021 
   18022 		do {
   18023 			delay(5 * 1000);
   18024 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   18025 		    && i--);
   18026 
   18027 		delay(30 * 1000);
   18028 	}
   18029 }
   18030 
   18031 static int
   18032 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   18033 {
   18034 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   18035 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   18036 	uint32_t rxa;
   18037 	uint16_t scale = 0, lat_enc = 0;
   18038 	int32_t obff_hwm = 0;
   18039 	int64_t lat_ns, value;
   18040 
   18041 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   18042 		device_xname(sc->sc_dev), __func__));
   18043 
   18044 	if (link) {
   18045 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   18046 		uint32_t status;
   18047 		uint16_t speed;
   18048 		pcireg_t preg;
   18049 
   18050 		status = CSR_READ(sc, WMREG_STATUS);
   18051 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   18052 		case STATUS_SPEED_10:
   18053 			speed = 10;
   18054 			break;
   18055 		case STATUS_SPEED_100:
   18056 			speed = 100;
   18057 			break;
   18058 		case STATUS_SPEED_1000:
   18059 			speed = 1000;
   18060 			break;
   18061 		default:
   18062 			device_printf(sc->sc_dev, "Unknown speed "
   18063 			    "(status = %08x)\n", status);
   18064 			return -1;
   18065 		}
   18066 
   18067 		/* Rx Packet Buffer Allocation size (KB) */
   18068 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   18069 
   18070 		/*
   18071 		 * Determine the maximum latency tolerated by the device.
   18072 		 *
   18073 		 * Per the PCIe spec, the tolerated latencies are encoded as
   18074 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   18075 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   18076 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   18077 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   18078 		 */
   18079 		lat_ns = ((int64_t)rxa * 1024 -
   18080 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   18081 			+ ETHER_HDR_LEN))) * 8 * 1000;
   18082 		if (lat_ns < 0)
   18083 			lat_ns = 0;
   18084 		else
   18085 			lat_ns /= speed;
   18086 		value = lat_ns;
   18087 
   18088 		while (value > LTRV_VALUE) {
   18089 			scale ++;
   18090 			value = howmany(value, __BIT(5));
   18091 		}
   18092 		if (scale > LTRV_SCALE_MAX) {
   18093 			device_printf(sc->sc_dev,
   18094 			    "Invalid LTR latency scale %d\n", scale);
   18095 			return -1;
   18096 		}
   18097 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   18098 
   18099 		/* Determine the maximum latency tolerated by the platform */
   18100 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   18101 		    WM_PCI_LTR_CAP_LPT);
   18102 		max_snoop = preg & 0xffff;
   18103 		max_nosnoop = preg >> 16;
   18104 
   18105 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   18106 
   18107 		if (lat_enc > max_ltr_enc) {
   18108 			lat_enc = max_ltr_enc;
   18109 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   18110 			    * PCI_LTR_SCALETONS(
   18111 				    __SHIFTOUT(lat_enc,
   18112 					PCI_LTR_MAXSNOOPLAT_SCALE));
   18113 		}
   18114 
   18115 		if (lat_ns) {
   18116 			lat_ns *= speed * 1000;
   18117 			lat_ns /= 8;
   18118 			lat_ns /= 1000000000;
   18119 			obff_hwm = (int32_t)(rxa - lat_ns);
   18120 		}
   18121 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   18122 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   18123 			    "(rxa = %d, lat_ns = %d)\n",
   18124 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   18125 			return -1;
   18126 		}
   18127 	}
   18128 	/* Snoop and No-Snoop latencies the same */
   18129 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   18130 	CSR_WRITE(sc, WMREG_LTRV, reg);
   18131 
   18132 	/* Set OBFF high water mark */
   18133 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   18134 	reg |= obff_hwm;
   18135 	CSR_WRITE(sc, WMREG_SVT, reg);
   18136 
   18137 	/* Enable OBFF */
   18138 	reg = CSR_READ(sc, WMREG_SVCR);
   18139 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   18140 	CSR_WRITE(sc, WMREG_SVCR, reg);
   18141 
   18142 	return 0;
   18143 }
   18144 
   18145 /*
   18146  * I210 Errata 25 and I211 Errata 10
   18147  * Slow System Clock.
   18148  *
   18149  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   18150  */
   18151 static int
   18152 wm_pll_workaround_i210(struct wm_softc *sc)
   18153 {
   18154 	uint32_t mdicnfg, wuc;
   18155 	uint32_t reg;
   18156 	pcireg_t pcireg;
   18157 	uint32_t pmreg;
   18158 	uint16_t nvmword, tmp_nvmword;
   18159 	uint16_t phyval;
   18160 	bool wa_done = false;
   18161 	int i, rv = 0;
   18162 
   18163 	/* Get Power Management cap offset */
   18164 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   18165 	    &pmreg, NULL) == 0)
   18166 		return -1;
   18167 
   18168 	/* Save WUC and MDICNFG registers */
   18169 	wuc = CSR_READ(sc, WMREG_WUC);
   18170 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   18171 
   18172 	reg = mdicnfg & ~MDICNFG_DEST;
   18173 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   18174 
   18175 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   18176 		/*
   18177 		 * The default value of the Initialization Control Word 1
   18178 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   18179 		 */
   18180 		nvmword = INVM_DEFAULT_AL;
   18181 	}
   18182 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   18183 
   18184 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   18185 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   18186 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   18187 
   18188 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   18189 			rv = 0;
   18190 			break; /* OK */
   18191 		} else
   18192 			rv = -1;
   18193 
   18194 		wa_done = true;
   18195 		/* Directly reset the internal PHY */
   18196 		reg = CSR_READ(sc, WMREG_CTRL);
   18197 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   18198 
   18199 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   18200 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   18201 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   18202 
   18203 		CSR_WRITE(sc, WMREG_WUC, 0);
   18204 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   18205 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   18206 
   18207 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   18208 		    pmreg + PCI_PMCSR);
   18209 		pcireg |= PCI_PMCSR_STATE_D3;
   18210 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   18211 		    pmreg + PCI_PMCSR, pcireg);
   18212 		delay(1000);
   18213 		pcireg &= ~PCI_PMCSR_STATE_D3;
   18214 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   18215 		    pmreg + PCI_PMCSR, pcireg);
   18216 
   18217 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   18218 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   18219 
   18220 		/* Restore WUC register */
   18221 		CSR_WRITE(sc, WMREG_WUC, wuc);
   18222 	}
   18223 
   18224 	/* Restore MDICNFG setting */
   18225 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   18226 	if (wa_done)
   18227 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   18228 	return rv;
   18229 }
   18230 
   18231 static void
   18232 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   18233 {
   18234 	uint32_t reg;
   18235 
   18236 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   18237 		device_xname(sc->sc_dev), __func__));
   18238 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   18239 	    || (sc->sc_type == WM_T_PCH_CNP) || (sc->sc_type == WM_T_PCH_TGP));
   18240 
   18241 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   18242 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   18243 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   18244 
   18245 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   18246 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   18247 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   18248 }
   18249 
   18250 /* Sysctl functions */
   18251 static int
   18252 wm_sysctl_tdh_handler(SYSCTLFN_ARGS)
   18253 {
   18254 	struct sysctlnode node = *rnode;
   18255 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   18256 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   18257 	struct wm_softc *sc = txq->txq_sc;
   18258 	uint32_t reg;
   18259 
   18260 	reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id));
   18261 	node.sysctl_data = &reg;
   18262 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   18263 }
   18264 
   18265 static int
   18266 wm_sysctl_tdt_handler(SYSCTLFN_ARGS)
   18267 {
   18268 	struct sysctlnode node = *rnode;
   18269 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   18270 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   18271 	struct wm_softc *sc = txq->txq_sc;
   18272 	uint32_t reg;
   18273 
   18274 	reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id));
   18275 	node.sysctl_data = &reg;
   18276 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   18277 }
   18278 
   18279 #ifdef WM_DEBUG
   18280 static int
   18281 wm_sysctl_debug(SYSCTLFN_ARGS)
   18282 {
   18283 	struct sysctlnode node = *rnode;
   18284 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
   18285 	uint32_t dflags;
   18286 	int error;
   18287 
   18288 	dflags = sc->sc_debug;
   18289 	node.sysctl_data = &dflags;
   18290 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   18291 
   18292 	if (error || newp == NULL)
   18293 		return error;
   18294 
   18295 	sc->sc_debug = dflags;
   18296 	device_printf(sc->sc_dev, "TARC0: %08x\n", CSR_READ(sc, WMREG_TARC0));
   18297 	device_printf(sc->sc_dev, "TDT0: %08x\n", CSR_READ(sc, WMREG_TDT(0)));
   18298 
   18299 	return 0;
   18300 }
   18301 #endif
   18302