Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.792
      1 /*	$NetBSD: if_wm.c,v 1.792 2023/11/21 23:09:40 gutteridge Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.792 2023/11/21 23:09:40 gutteridge Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_if_wm.h"
     89 #endif
     90 
     91 #include <sys/param.h>
     92 
     93 #include <sys/atomic.h>
     94 #include <sys/callout.h>
     95 #include <sys/cpu.h>
     96 #include <sys/device.h>
     97 #include <sys/errno.h>
     98 #include <sys/interrupt.h>
     99 #include <sys/ioctl.h>
    100 #include <sys/kernel.h>
    101 #include <sys/kmem.h>
    102 #include <sys/mbuf.h>
    103 #include <sys/pcq.h>
    104 #include <sys/queue.h>
    105 #include <sys/rndsource.h>
    106 #include <sys/socket.h>
    107 #include <sys/sysctl.h>
    108 #include <sys/syslog.h>
    109 #include <sys/systm.h>
    110 #include <sys/workqueue.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <net/rss_config.h>
    120 
    121 #include <netinet/in.h>			/* XXX for struct ip */
    122 #include <netinet/in_systm.h>		/* XXX for struct ip */
    123 #include <netinet/ip.h>			/* XXX for struct ip */
    124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    126 
    127 #include <sys/bus.h>
    128 #include <sys/intr.h>
    129 #include <machine/endian.h>
    130 
    131 #include <dev/mii/mii.h>
    132 #include <dev/mii/mdio.h>
    133 #include <dev/mii/miivar.h>
    134 #include <dev/mii/miidevs.h>
    135 #include <dev/mii/mii_bitbang.h>
    136 #include <dev/mii/ikphyreg.h>
    137 #include <dev/mii/igphyreg.h>
    138 #include <dev/mii/igphyvar.h>
    139 #include <dev/mii/inbmphyreg.h>
    140 #include <dev/mii/ihphyreg.h>
    141 #include <dev/mii/makphyreg.h>
    142 
    143 #include <dev/pci/pcireg.h>
    144 #include <dev/pci/pcivar.h>
    145 #include <dev/pci/pcidevs.h>
    146 
    147 #include <dev/pci/if_wmreg.h>
    148 #include <dev/pci/if_wmvar.h>
    149 
    150 #ifdef WM_DEBUG
    151 #define	WM_DEBUG_LINK		__BIT(0)
    152 #define	WM_DEBUG_TX		__BIT(1)
    153 #define	WM_DEBUG_RX		__BIT(2)
    154 #define	WM_DEBUG_GMII		__BIT(3)
    155 #define	WM_DEBUG_MANAGE		__BIT(4)
    156 #define	WM_DEBUG_NVM		__BIT(5)
    157 #define	WM_DEBUG_INIT		__BIT(6)
    158 #define	WM_DEBUG_LOCK		__BIT(7)
    159 
    160 #if 0
    161 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
    162 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
    163 	WM_DEBUG_LOCK
    164 #endif
    165 
    166 #define	DPRINTF(sc, x, y)			  \
    167 	do {					  \
    168 		if ((sc)->sc_debug & (x))	  \
    169 			printf y;		  \
    170 	} while (0)
    171 #else
    172 #define	DPRINTF(sc, x, y)	__nothing
    173 #endif /* WM_DEBUG */
    174 
    175 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    176 
    177 /*
    178  * This device driver's max interrupt numbers.
    179  */
    180 #define WM_MAX_NQUEUEINTR	16
    181 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    182 
    183 #ifndef WM_DISABLE_MSI
    184 #define	WM_DISABLE_MSI 0
    185 #endif
    186 #ifndef WM_DISABLE_MSIX
    187 #define	WM_DISABLE_MSIX 0
    188 #endif
    189 
    190 int wm_disable_msi = WM_DISABLE_MSI;
    191 int wm_disable_msix = WM_DISABLE_MSIX;
    192 
    193 #ifndef WM_WATCHDOG_TIMEOUT
    194 #define WM_WATCHDOG_TIMEOUT 5
    195 #endif
    196 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    197 
    198 /*
    199  * Transmit descriptor list size.  Due to errata, we can only have
    200  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    201  * on >= 82544. We tell the upper layers that they can queue a lot
    202  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    203  * of them at a time.
    204  *
    205  * We allow up to 64 DMA segments per packet.  Pathological packet
    206  * chains containing many small mbufs have been observed in zero-copy
    207  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    208  * m_defrag() is called to reduce it.
    209  */
    210 #define	WM_NTXSEGS		64
    211 #define	WM_IFQUEUELEN		256
    212 #define	WM_TXQUEUELEN_MAX	64
    213 #define	WM_TXQUEUELEN_MAX_82547	16
    214 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    215 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    216 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    217 #define	WM_NTXDESC_82542	256
    218 #define	WM_NTXDESC_82544	4096
    219 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    220 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    221 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    222 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    223 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    224 
    225 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    226 
    227 #define	WM_TXINTERQSIZE		256
    228 
    229 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    230 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    231 #endif
    232 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    233 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    234 #endif
    235 
    236 /*
    237  * Receive descriptor list size.  We have one Rx buffer for normal
    238  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    239  * packet.  We allocate 256 receive descriptors, each with a 2k
    240  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    241  */
    242 #define	WM_NRXDESC		256U
    243 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    244 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    245 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    246 
    247 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    248 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    249 #endif
    250 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    251 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    252 #endif
    253 
    254 typedef union txdescs {
    255 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    256 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    257 } txdescs_t;
    258 
    259 typedef union rxdescs {
    260 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    261 	ext_rxdesc_t	 sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    262 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    263 } rxdescs_t;
    264 
    265 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    266 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    267 
    268 /*
    269  * Software state for transmit jobs.
    270  */
    271 struct wm_txsoft {
    272 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    273 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    274 	int txs_firstdesc;		/* first descriptor in packet */
    275 	int txs_lastdesc;		/* last descriptor in packet */
    276 	int txs_ndesc;			/* # of descriptors used */
    277 };
    278 
    279 /*
    280  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    281  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    282  * them together.
    283  */
    284 struct wm_rxsoft {
    285 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    286 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    287 };
    288 
    289 #define WM_LINKUP_TIMEOUT	50
    290 
    291 static uint16_t swfwphysem[] = {
    292 	SWFW_PHY0_SM,
    293 	SWFW_PHY1_SM,
    294 	SWFW_PHY2_SM,
    295 	SWFW_PHY3_SM
    296 };
    297 
    298 static const uint32_t wm_82580_rxpbs_table[] = {
    299 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    300 };
    301 
    302 struct wm_softc;
    303 
    304 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
    305 #if !defined(WM_EVENT_COUNTERS)
    306 #define WM_EVENT_COUNTERS 1
    307 #endif
    308 #endif
    309 
    310 #ifdef WM_EVENT_COUNTERS
    311 #define WM_Q_EVCNT_DEFINE(qname, evname)				 \
    312 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    313 	struct evcnt qname##_ev_##evname
    314 
    315 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    316 	do {								\
    317 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    318 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    319 		    "%s%02d%s", #qname, (qnum), #evname);		\
    320 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    321 		    (evtype), NULL, (xname),				\
    322 		    (q)->qname##_##evname##_evcnt_name);		\
    323 	} while (0)
    324 
    325 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    326 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    327 
    328 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    329 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    330 
    331 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    332 	evcnt_detach(&(q)->qname##_ev_##evname)
    333 #endif /* WM_EVENT_COUNTERS */
    334 
    335 struct wm_txqueue {
    336 	kmutex_t *txq_lock;		/* lock for tx operations */
    337 
    338 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    339 
    340 	/* Software state for the transmit descriptors. */
    341 	int txq_num;			/* must be a power of two */
    342 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    343 
    344 	/* TX control data structures. */
    345 	int txq_ndesc;			/* must be a power of two */
    346 	size_t txq_descsize;		/* a tx descriptor size */
    347 	txdescs_t *txq_descs_u;
    348 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    349 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    350 	int txq_desc_rseg;		/* real number of control segment */
    351 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    352 #define	txq_descs	txq_descs_u->sctxu_txdescs
    353 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    354 
    355 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    356 
    357 	int txq_free;			/* number of free Tx descriptors */
    358 	int txq_next;			/* next ready Tx descriptor */
    359 
    360 	int txq_sfree;			/* number of free Tx jobs */
    361 	int txq_snext;			/* next free Tx job */
    362 	int txq_sdirty;			/* dirty Tx jobs */
    363 
    364 	/* These 4 variables are used only on the 82547. */
    365 	int txq_fifo_size;		/* Tx FIFO size */
    366 	int txq_fifo_head;		/* current head of FIFO */
    367 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    368 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    369 
    370 	/*
    371 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    372 	 * CPUs. This queue intermediate them without block.
    373 	 */
    374 	pcq_t *txq_interq;
    375 
    376 	/*
    377 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    378 	 * to manage Tx H/W queue's busy flag.
    379 	 */
    380 	int txq_flags;			/* flags for H/W queue, see below */
    381 #define	WM_TXQ_NO_SPACE		0x1
    382 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
    383 
    384 	bool txq_stopping;
    385 
    386 	bool txq_sending;
    387 	time_t txq_lastsent;
    388 
    389 	/* Checksum flags used for previous packet */
    390 	uint32_t	txq_last_hw_cmd;
    391 	uint8_t		txq_last_hw_fields;
    392 	uint16_t	txq_last_hw_ipcs;
    393 	uint16_t	txq_last_hw_tucs;
    394 
    395 	uint32_t txq_packets;		/* for AIM */
    396 	uint32_t txq_bytes;		/* for AIM */
    397 #ifdef WM_EVENT_COUNTERS
    398 	/* TX event counters */
    399 	WM_Q_EVCNT_DEFINE(txq, txsstall);   /* Stalled due to no txs */
    400 	WM_Q_EVCNT_DEFINE(txq, txdstall);   /* Stalled due to no txd */
    401 	WM_Q_EVCNT_DEFINE(txq, fifo_stall); /* FIFO stalls (82547) */
    402 	WM_Q_EVCNT_DEFINE(txq, txdw);	    /* Tx descriptor interrupts */
    403 	WM_Q_EVCNT_DEFINE(txq, txqe);	    /* Tx queue empty interrupts */
    404 					    /* XXX not used? */
    405 
    406 	WM_Q_EVCNT_DEFINE(txq, ipsum);	    /* IP checksums comp. */
    407 	WM_Q_EVCNT_DEFINE(txq, tusum);	    /* TCP/UDP cksums comp. */
    408 	WM_Q_EVCNT_DEFINE(txq, tusum6);	    /* TCP/UDP v6 cksums comp. */
    409 	WM_Q_EVCNT_DEFINE(txq, tso);	    /* TCP seg offload (IPv4) */
    410 	WM_Q_EVCNT_DEFINE(txq, tso6);	    /* TCP seg offload (IPv6) */
    411 	WM_Q_EVCNT_DEFINE(txq, tsopain);    /* Painful header manip. for TSO */
    412 	WM_Q_EVCNT_DEFINE(txq, pcqdrop);    /* Pkt dropped in pcq */
    413 	WM_Q_EVCNT_DEFINE(txq, descdrop);   /* Pkt dropped in MAC desc ring */
    414 					    /* other than toomanyseg */
    415 
    416 	WM_Q_EVCNT_DEFINE(txq, toomanyseg); /* Pkt dropped(toomany DMA segs) */
    417 	WM_Q_EVCNT_DEFINE(txq, defrag);	    /* m_defrag() */
    418 	WM_Q_EVCNT_DEFINE(txq, underrun);   /* Tx underrun */
    419 	WM_Q_EVCNT_DEFINE(txq, skipcontext); /* Tx skip wrong cksum context */
    420 
    421 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    422 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    423 #endif /* WM_EVENT_COUNTERS */
    424 };
    425 
    426 struct wm_rxqueue {
    427 	kmutex_t *rxq_lock;		/* lock for rx operations */
    428 
    429 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    430 
    431 	/* Software state for the receive descriptors. */
    432 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    433 
    434 	/* RX control data structures. */
    435 	int rxq_ndesc;			/* must be a power of two */
    436 	size_t rxq_descsize;		/* a rx descriptor size */
    437 	rxdescs_t *rxq_descs_u;
    438 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    439 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    440 	int rxq_desc_rseg;		/* real number of control segment */
    441 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    442 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    443 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    444 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    445 
    446 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    447 
    448 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    449 	int rxq_discard;
    450 	int rxq_len;
    451 	struct mbuf *rxq_head;
    452 	struct mbuf *rxq_tail;
    453 	struct mbuf **rxq_tailp;
    454 
    455 	bool rxq_stopping;
    456 
    457 	uint32_t rxq_packets;		/* for AIM */
    458 	uint32_t rxq_bytes;		/* for AIM */
    459 #ifdef WM_EVENT_COUNTERS
    460 	/* RX event counters */
    461 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    462 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    463 
    464 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    465 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    466 #endif
    467 };
    468 
    469 struct wm_queue {
    470 	int wmq_id;			/* index of TX/RX queues */
    471 	int wmq_intr_idx;		/* index of MSI-X tables */
    472 
    473 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    474 	bool wmq_set_itr;
    475 
    476 	struct wm_txqueue wmq_txq;
    477 	struct wm_rxqueue wmq_rxq;
    478 	char sysctlname[32];		/* Name for sysctl */
    479 
    480 	bool wmq_txrx_use_workqueue;
    481 	bool wmq_wq_enqueued;
    482 	struct work wmq_cookie;
    483 	void *wmq_si;
    484 };
    485 
    486 struct wm_phyop {
    487 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
    488 	void (*release)(struct wm_softc *);
    489 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    490 	int (*writereg_locked)(device_t, int, int, uint16_t);
    491 	int reset_delay_us;
    492 	bool no_errprint;
    493 };
    494 
    495 struct wm_nvmop {
    496 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
    497 	void (*release)(struct wm_softc *);
    498 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    499 };
    500 
    501 /*
    502  * Software state per device.
    503  */
    504 struct wm_softc {
    505 	device_t sc_dev;		/* generic device information */
    506 	bus_space_tag_t sc_st;		/* bus space tag */
    507 	bus_space_handle_t sc_sh;	/* bus space handle */
    508 	bus_size_t sc_ss;		/* bus space size */
    509 	bus_space_tag_t sc_iot;		/* I/O space tag */
    510 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    511 	bus_size_t sc_ios;		/* I/O space size */
    512 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    513 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    514 	bus_size_t sc_flashs;		/* flash registers space size */
    515 	off_t sc_flashreg_offset;	/*
    516 					 * offset to flash registers from
    517 					 * start of BAR
    518 					 */
    519 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    520 
    521 	struct ethercom sc_ethercom;	/* Ethernet common data */
    522 	struct mii_data sc_mii;		/* MII/media information */
    523 
    524 	pci_chipset_tag_t sc_pc;
    525 	pcitag_t sc_pcitag;
    526 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    527 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    528 
    529 	uint16_t sc_pcidevid;		/* PCI device ID */
    530 	wm_chip_type sc_type;		/* MAC type */
    531 	int sc_rev;			/* MAC revision */
    532 	wm_phy_type sc_phytype;		/* PHY type */
    533 	uint8_t sc_sfptype;		/* SFP type */
    534 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    535 #define	WM_MEDIATYPE_UNKNOWN		0x00
    536 #define	WM_MEDIATYPE_FIBER		0x01
    537 #define	WM_MEDIATYPE_COPPER		0x02
    538 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    539 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    540 	u_int sc_flags;			/* flags; see below */
    541 	u_short sc_if_flags;		/* last if_flags */
    542 	int sc_ec_capenable;		/* last ec_capenable */
    543 	int sc_flowflags;		/* 802.3x flow control flags */
    544 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    545 	int sc_align_tweak;
    546 
    547 	void *sc_ihs[WM_MAX_NINTR];	/*
    548 					 * interrupt cookie.
    549 					 * - legacy and msi use sc_ihs[0] only
    550 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    551 					 */
    552 	pci_intr_handle_t *sc_intrs;	/*
    553 					 * legacy and msi use sc_intrs[0] only
    554 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    555 					 */
    556 	int sc_nintrs;			/* number of interrupts */
    557 
    558 	int sc_link_intr_idx;		/* index of MSI-X tables */
    559 
    560 	callout_t sc_tick_ch;		/* tick callout */
    561 	bool sc_core_stopping;
    562 
    563 	int sc_nvm_ver_major;
    564 	int sc_nvm_ver_minor;
    565 	int sc_nvm_ver_build;
    566 	int sc_nvm_addrbits;		/* NVM address bits */
    567 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    568 	int sc_ich8_flash_base;
    569 	int sc_ich8_flash_bank_size;
    570 	int sc_nvm_k1_enabled;
    571 
    572 	int sc_nqueues;
    573 	struct wm_queue *sc_queue;
    574 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    575 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    576 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    577 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    578 	struct workqueue *sc_queue_wq;
    579 	bool sc_txrx_use_workqueue;
    580 
    581 	int sc_affinity_offset;
    582 
    583 #ifdef WM_EVENT_COUNTERS
    584 	/* Event counters. */
    585 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    586 
    587 	/* >= WM_T_82542_2_1 */
    588 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    589 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    590 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    591 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    592 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    593 
    594 	struct evcnt sc_ev_crcerrs;	/* CRC Error */
    595 	struct evcnt sc_ev_algnerrc;	/* Alignment Error */
    596 	struct evcnt sc_ev_symerrc;	/* Symbol Error */
    597 	struct evcnt sc_ev_rxerrc;	/* Receive Error */
    598 	struct evcnt sc_ev_mpc;		/* Missed Packets */
    599 	struct evcnt sc_ev_scc;		/* Single Collision */
    600 	struct evcnt sc_ev_ecol;	/* Excessive Collision */
    601 	struct evcnt sc_ev_mcc;		/* Multiple Collision */
    602 	struct evcnt sc_ev_latecol;	/* Late Collision */
    603 	struct evcnt sc_ev_colc;	/* Collision */
    604 	struct evcnt sc_ev_cbtmpc;	/* Circuit Breaker Tx Mng. Packet */
    605 	struct evcnt sc_ev_dc;		/* Defer */
    606 	struct evcnt sc_ev_tncrs;	/* Tx-No CRS */
    607 	struct evcnt sc_ev_sec;		/* Sequence Error */
    608 
    609 	/* Old */
    610 	struct evcnt sc_ev_cexterr;	/* Carrier Extension Error */
    611 	/* New */
    612 	struct evcnt sc_ev_htdpmc;	/* Host Tx Discarded Pkts by MAC */
    613 
    614 	struct evcnt sc_ev_rlec;	/* Receive Length Error */
    615 	struct evcnt sc_ev_cbrdpc;	/* Circuit Breaker Rx Dropped Packet */
    616 	struct evcnt sc_ev_prc64;	/* Packets Rx (64 bytes) */
    617 	struct evcnt sc_ev_prc127;	/* Packets Rx (65-127 bytes) */
    618 	struct evcnt sc_ev_prc255;	/* Packets Rx (128-255 bytes) */
    619 	struct evcnt sc_ev_prc511;	/* Packets Rx (256-511 bytes) */
    620 	struct evcnt sc_ev_prc1023;	/* Packets Rx (512-1023 bytes) */
    621 	struct evcnt sc_ev_prc1522;	/* Packets Rx (1024-1522 bytes) */
    622 	struct evcnt sc_ev_gprc;	/* Good Packets Rx */
    623 	struct evcnt sc_ev_bprc;	/* Broadcast Packets Rx */
    624 	struct evcnt sc_ev_mprc;	/* Multicast Packets Rx */
    625 	struct evcnt sc_ev_gptc;	/* Good Packets Tx */
    626 	struct evcnt sc_ev_gorc;	/* Good Octets Rx */
    627 	struct evcnt sc_ev_gotc;	/* Good Octets Tx */
    628 	struct evcnt sc_ev_rnbc;	/* Rx No Buffers */
    629 	struct evcnt sc_ev_ruc;		/* Rx Undersize */
    630 	struct evcnt sc_ev_rfc;		/* Rx Fragment */
    631 	struct evcnt sc_ev_roc;		/* Rx Oversize */
    632 	struct evcnt sc_ev_rjc;		/* Rx Jabber */
    633 	struct evcnt sc_ev_mgtprc;	/* Management Packets RX */
    634 	struct evcnt sc_ev_mgtpdc;	/* Management Packets Dropped */
    635 	struct evcnt sc_ev_mgtptc;	/* Management Packets TX */
    636 	struct evcnt sc_ev_tor;		/* Total Octets Rx */
    637 	struct evcnt sc_ev_tot;		/* Total Octets Tx */
    638 	struct evcnt sc_ev_tpr;		/* Total Packets Rx */
    639 	struct evcnt sc_ev_tpt;		/* Total Packets Tx */
    640 	struct evcnt sc_ev_ptc64;	/* Packets Tx (64 bytes) */
    641 	struct evcnt sc_ev_ptc127;	/* Packets Tx (65-127 bytes) */
    642 	struct evcnt sc_ev_ptc255;	/* Packets Tx (128-255 bytes) */
    643 	struct evcnt sc_ev_ptc511;	/* Packets Tx (256-511 bytes) */
    644 	struct evcnt sc_ev_ptc1023;	/* Packets Tx (512-1023 bytes) */
    645 	struct evcnt sc_ev_ptc1522;	/* Packets Tx (1024-1522 Bytes) */
    646 	struct evcnt sc_ev_mptc;	/* Multicast Packets Tx */
    647 	struct evcnt sc_ev_bptc;	/* Broadcast Packets Tx */
    648 	struct evcnt sc_ev_tsctc;	/* TCP Segmentation Context Tx */
    649 
    650 	/* Old */
    651 	struct evcnt sc_ev_tsctfc;	/* TCP Segmentation Context Tx Fail */
    652 	/* New */
    653 	struct evcnt sc_ev_cbrmpc;	/* Circuit Breaker Rx Mng. Packet */
    654 
    655 	struct evcnt sc_ev_iac;		/* Interrupt Assertion */
    656 
    657 	/* Old */
    658 	struct evcnt sc_ev_icrxptc;	/* Intr. Cause Rx Pkt Timer Expire */
    659 	struct evcnt sc_ev_icrxatc;	/* Intr. Cause Rx Abs Timer Expire */
    660 	struct evcnt sc_ev_ictxptc;	/* Intr. Cause Tx Pkt Timer Expire */
    661 	struct evcnt sc_ev_ictxatc;	/* Intr. Cause Tx Abs Timer Expire */
    662 	struct evcnt sc_ev_ictxqec;	/* Intr. Cause Tx Queue Empty */
    663 	struct evcnt sc_ev_ictxqmtc;	/* Intr. Cause Tx Queue Min Thresh */
    664 	/*
    665 	 * sc_ev_rxdmtc is shared with both "Intr. cause" and
    666 	 * non "Intr. cause" register.
    667 	 */
    668 	struct evcnt sc_ev_rxdmtc;	/* (Intr. Cause) Rx Desc Min Thresh */
    669 	struct evcnt sc_ev_icrxoc;	/* Intr. Cause Receiver Overrun */
    670 	/* New */
    671 	struct evcnt sc_ev_rpthc;	/* Rx Packets To Host */
    672 	struct evcnt sc_ev_debug1;	/* Debug Counter 1 */
    673 	struct evcnt sc_ev_debug2;	/* Debug Counter 2 */
    674 	struct evcnt sc_ev_debug3;	/* Debug Counter 3 */
    675 	struct evcnt sc_ev_hgptc;	/* Host Good Packets TX */
    676 	struct evcnt sc_ev_debug4;	/* Debug Counter 4 */
    677 	struct evcnt sc_ev_htcbdpc;	/* Host Tx Circuit Breaker Drp. Pkts */
    678 	struct evcnt sc_ev_hgorc;	/* Host Good Octets Rx */
    679 	struct evcnt sc_ev_hgotc;	/* Host Good Octets Tx */
    680 	struct evcnt sc_ev_lenerrs;	/* Length Error */
    681 	struct evcnt sc_ev_tlpic;	/* EEE Tx LPI */
    682 	struct evcnt sc_ev_rlpic;	/* EEE Rx LPI */
    683 	struct evcnt sc_ev_b2ogprc;	/* BMC2OS pkts received by host */
    684 	struct evcnt sc_ev_o2bspc;	/* OS2BMC pkts transmitted by host */
    685 	struct evcnt sc_ev_b2ospc;	/* BMC2OS pkts sent by BMC */
    686 	struct evcnt sc_ev_o2bgptc;	/* OS2BMC pkts received by BMC */
    687 	struct evcnt sc_ev_scvpc;	/* SerDes/SGMII Code Violation Pkt. */
    688 	struct evcnt sc_ev_hrmpc;	/* Header Redirection Missed Packet */
    689 #endif /* WM_EVENT_COUNTERS */
    690 
    691 	struct sysctllog *sc_sysctllog;
    692 
    693 	/* This variable are used only on the 82547. */
    694 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    695 
    696 	uint32_t sc_ctrl;		/* prototype CTRL register */
    697 #if 0
    698 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    699 #endif
    700 	uint32_t sc_icr;		/* prototype interrupt bits */
    701 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    702 	uint32_t sc_tctl;		/* prototype TCTL register */
    703 	uint32_t sc_rctl;		/* prototype RCTL register */
    704 	uint32_t sc_txcw;		/* prototype TXCW register */
    705 	uint32_t sc_tipg;		/* prototype TIPG register */
    706 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    707 	uint32_t sc_pba;		/* prototype PBA register */
    708 
    709 	int sc_tbi_linkup;		/* TBI link status */
    710 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    711 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    712 	struct timeval sc_linkup_delay_time; /* delay LINK_STATE_UP */
    713 
    714 	int sc_mchash_type;		/* multicast filter offset */
    715 
    716 	krndsource_t rnd_source;	/* random source */
    717 
    718 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    719 
    720 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    721 	kmutex_t *sc_ich_phymtx;	/*
    722 					 * 82574/82583/ICH/PCH specific PHY
    723 					 * mutex. For 82574/82583, the mutex
    724 					 * is used for both PHY and NVM.
    725 					 */
    726 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    727 
    728 	struct wm_phyop phy;
    729 	struct wm_nvmop nvm;
    730 
    731 	struct workqueue *sc_reset_wq;
    732 	struct work sc_reset_work;
    733 	volatile unsigned sc_reset_pending;
    734 
    735 	bool sc_dying;
    736 
    737 #ifdef WM_DEBUG
    738 	uint32_t sc_debug;
    739 	bool sc_trigger_reset;
    740 #endif
    741 };
    742 
    743 #define	WM_RXCHAIN_RESET(rxq)						\
    744 do {									\
    745 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    746 	*(rxq)->rxq_tailp = NULL;					\
    747 	(rxq)->rxq_len = 0;						\
    748 } while (/*CONSTCOND*/0)
    749 
    750 #define	WM_RXCHAIN_LINK(rxq, m)						\
    751 do {									\
    752 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    753 	(rxq)->rxq_tailp = &(m)->m_next;				\
    754 } while (/*CONSTCOND*/0)
    755 
    756 #ifdef WM_EVENT_COUNTERS
    757 #ifdef __HAVE_ATOMIC64_LOADSTORE
    758 #define	WM_EVCNT_INCR(ev)						\
    759 	atomic_store_relaxed(&((ev)->ev_count),				\
    760 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
    761 #define	WM_EVCNT_STORE(ev, val)						\
    762 	atomic_store_relaxed(&((ev)->ev_count), (val))
    763 #define	WM_EVCNT_ADD(ev, val)						\
    764 	atomic_store_relaxed(&((ev)->ev_count),				\
    765 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
    766 #else
    767 #define	WM_EVCNT_INCR(ev)						\
    768 	((ev)->ev_count)++
    769 #define	WM_EVCNT_STORE(ev, val)						\
    770 	((ev)->ev_count = (val))
    771 #define	WM_EVCNT_ADD(ev, val)						\
    772 	(ev)->ev_count += (val)
    773 #endif
    774 
    775 #define WM_Q_EVCNT_INCR(qname, evname)			\
    776 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    777 #define WM_Q_EVCNT_STORE(qname, evname, val)		\
    778 	WM_EVCNT_STORE(&(qname)->qname##_ev_##evname, (val))
    779 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    780 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    781 #else /* !WM_EVENT_COUNTERS */
    782 #define	WM_EVCNT_INCR(ev)	__nothing
    783 #define	WM_EVCNT_STORE(ev, val)	__nothing
    784 #define	WM_EVCNT_ADD(ev, val)	__nothing
    785 
    786 #define WM_Q_EVCNT_INCR(qname, evname)		__nothing
    787 #define WM_Q_EVCNT_STORE(qname, evname, val)	__nothing
    788 #define WM_Q_EVCNT_ADD(qname, evname, val)	__nothing
    789 #endif /* !WM_EVENT_COUNTERS */
    790 
    791 #define	CSR_READ(sc, reg)						\
    792 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    793 #define	CSR_WRITE(sc, reg, val)						\
    794 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    795 #define	CSR_WRITE_FLUSH(sc)						\
    796 	(void)CSR_READ((sc), WMREG_STATUS)
    797 
    798 #define ICH8_FLASH_READ32(sc, reg)					\
    799 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    800 	    (reg) + sc->sc_flashreg_offset)
    801 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    802 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    803 	    (reg) + sc->sc_flashreg_offset, (data))
    804 
    805 #define ICH8_FLASH_READ16(sc, reg)					\
    806 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    807 	    (reg) + sc->sc_flashreg_offset)
    808 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    809 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    810 	    (reg) + sc->sc_flashreg_offset, (data))
    811 
    812 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    813 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    814 
    815 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    816 #define	WM_CDTXADDR_HI(txq, x)						\
    817 	(sizeof(bus_addr_t) == 8 ?					\
    818 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    819 
    820 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    821 #define	WM_CDRXADDR_HI(rxq, x)						\
    822 	(sizeof(bus_addr_t) == 8 ?					\
    823 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    824 
    825 /*
    826  * Register read/write functions.
    827  * Other than CSR_{READ|WRITE}().
    828  */
    829 #if 0
    830 static inline uint32_t wm_io_read(struct wm_softc *, int);
    831 #endif
    832 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    833 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    834     uint32_t, uint32_t);
    835 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    836 
    837 /*
    838  * Descriptor sync/init functions.
    839  */
    840 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    841 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    842 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    843 
    844 /*
    845  * Device driver interface functions and commonly used functions.
    846  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    847  */
    848 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    849 static int	wm_match(device_t, cfdata_t, void *);
    850 static void	wm_attach(device_t, device_t, void *);
    851 static int	wm_detach(device_t, int);
    852 static bool	wm_suspend(device_t, const pmf_qual_t *);
    853 static bool	wm_resume(device_t, const pmf_qual_t *);
    854 static bool	wm_watchdog(struct ifnet *);
    855 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    856     uint16_t *);
    857 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    858     uint16_t *);
    859 static void	wm_tick(void *);
    860 static int	wm_ifflags_cb(struct ethercom *);
    861 static int	wm_ioctl(struct ifnet *, u_long, void *);
    862 /* MAC address related */
    863 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    864 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    865 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    866 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    867 static int	wm_rar_count(struct wm_softc *);
    868 static void	wm_set_filter(struct wm_softc *);
    869 /* Reset and init related */
    870 static void	wm_set_vlan(struct wm_softc *);
    871 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    872 static void	wm_get_auto_rd_done(struct wm_softc *);
    873 static void	wm_lan_init_done(struct wm_softc *);
    874 static void	wm_get_cfg_done(struct wm_softc *);
    875 static int	wm_phy_post_reset(struct wm_softc *);
    876 static int	wm_write_smbus_addr(struct wm_softc *);
    877 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    878 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    879 static void	wm_initialize_hardware_bits(struct wm_softc *);
    880 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    881 static int	wm_reset_phy(struct wm_softc *);
    882 static void	wm_flush_desc_rings(struct wm_softc *);
    883 static void	wm_reset(struct wm_softc *);
    884 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    885 static void	wm_rxdrain(struct wm_rxqueue *);
    886 static void	wm_init_rss(struct wm_softc *);
    887 static void	wm_adjust_qnum(struct wm_softc *, int);
    888 static inline bool	wm_is_using_msix(struct wm_softc *);
    889 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    890 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    891 static int	wm_setup_legacy(struct wm_softc *);
    892 static int	wm_setup_msix(struct wm_softc *);
    893 static int	wm_init(struct ifnet *);
    894 static int	wm_init_locked(struct ifnet *);
    895 static void	wm_init_sysctls(struct wm_softc *);
    896 static void	wm_update_stats(struct wm_softc *);
    897 static void	wm_clear_evcnt(struct wm_softc *);
    898 static void	wm_unset_stopping_flags(struct wm_softc *);
    899 static void	wm_set_stopping_flags(struct wm_softc *);
    900 static void	wm_stop(struct ifnet *, int);
    901 static void	wm_stop_locked(struct ifnet *, bool, bool);
    902 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    903 static void	wm_82547_txfifo_stall(void *);
    904 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    905 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    906 /* DMA related */
    907 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    908 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    909 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    910 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    911     struct wm_txqueue *);
    912 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    913 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    914 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    915     struct wm_rxqueue *);
    916 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    917 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    918 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    919 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    920 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    921 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    922 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    923     struct wm_txqueue *);
    924 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    925     struct wm_rxqueue *);
    926 static int	wm_alloc_txrx_queues(struct wm_softc *);
    927 static void	wm_free_txrx_queues(struct wm_softc *);
    928 static int	wm_init_txrx_queues(struct wm_softc *);
    929 /* Start */
    930 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    931     struct wm_txsoft *, uint32_t *, uint8_t *);
    932 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    933 static void	wm_start(struct ifnet *);
    934 static void	wm_start_locked(struct ifnet *);
    935 static int	wm_transmit(struct ifnet *, struct mbuf *);
    936 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    937 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    938     bool);
    939 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    940     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    941 static void	wm_nq_start(struct ifnet *);
    942 static void	wm_nq_start_locked(struct ifnet *);
    943 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    944 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    945 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    946     bool);
    947 static void	wm_deferred_start_locked(struct wm_txqueue *);
    948 static void	wm_handle_queue(void *);
    949 static void	wm_handle_queue_work(struct work *, void *);
    950 static void	wm_handle_reset_work(struct work *, void *);
    951 /* Interrupt */
    952 static bool	wm_txeof(struct wm_txqueue *, u_int);
    953 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    954 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    955 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    956 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    957 static void	wm_linkintr(struct wm_softc *, uint32_t);
    958 static int	wm_intr_legacy(void *);
    959 static inline void	wm_txrxintr_disable(struct wm_queue *);
    960 static inline void	wm_txrxintr_enable(struct wm_queue *);
    961 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    962 static int	wm_txrxintr_msix(void *);
    963 static int	wm_linkintr_msix(void *);
    964 
    965 /*
    966  * Media related.
    967  * GMII, SGMII, TBI, SERDES and SFP.
    968  */
    969 /* Common */
    970 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    971 /* GMII related */
    972 static void	wm_gmii_reset(struct wm_softc *);
    973 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    974 static int	wm_get_phy_id_82575(struct wm_softc *);
    975 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    976 static int	wm_gmii_mediachange(struct ifnet *);
    977 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    978 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    979 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    980 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    981 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    982 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    983 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    984 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    985 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    986 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    987 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    988 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    989 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    990 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    991 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    992 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    993 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    994 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    995 	bool);
    996 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    997 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    998 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    999 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
   1000 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
   1001 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
   1002 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
   1003 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
   1004 static void	wm_gmii_statchg(struct ifnet *);
   1005 /*
   1006  * kumeran related (80003, ICH* and PCH*).
   1007  * These functions are not for accessing MII registers but for accessing
   1008  * kumeran specific registers.
   1009  */
   1010 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
   1011 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
   1012 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
   1013 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
   1014 /* EMI register related */
   1015 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
   1016 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
   1017 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
   1018 /* SGMII */
   1019 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
   1020 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
   1021 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
   1022 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
   1023 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
   1024 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
   1025 /* TBI related */
   1026 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
   1027 static void	wm_tbi_mediainit(struct wm_softc *);
   1028 static int	wm_tbi_mediachange(struct ifnet *);
   1029 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
   1030 static int	wm_check_for_link(struct wm_softc *);
   1031 static void	wm_tbi_tick(struct wm_softc *);
   1032 /* SERDES related */
   1033 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
   1034 static int	wm_serdes_mediachange(struct ifnet *);
   1035 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
   1036 static void	wm_serdes_tick(struct wm_softc *);
   1037 /* SFP related */
   1038 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
   1039 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
   1040 
   1041 /*
   1042  * NVM related.
   1043  * Microwire, SPI (w/wo EERD) and Flash.
   1044  */
   1045 /* Misc functions */
   1046 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
   1047 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
   1048 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
   1049 /* Microwire */
   1050 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
   1051 /* SPI */
   1052 static int	wm_nvm_ready_spi(struct wm_softc *);
   1053 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
   1054 /* Using with EERD */
   1055 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
   1056 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
   1057 /* Flash */
   1058 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
   1059     unsigned int *);
   1060 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
   1061 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
   1062 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
   1063     uint32_t *);
   1064 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
   1065 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
   1066 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
   1067 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
   1068 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
   1069 /* iNVM */
   1070 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
   1071 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
   1072 /* Lock, detecting NVM type, validate checksum and read */
   1073 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
   1074 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
   1075 static int	wm_nvm_validate_checksum(struct wm_softc *);
   1076 static void	wm_nvm_version_invm(struct wm_softc *);
   1077 static void	wm_nvm_version(struct wm_softc *);
   1078 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
   1079 
   1080 /*
   1081  * Hardware semaphores.
   1082  * Very complexed...
   1083  */
   1084 static int	wm_get_null(struct wm_softc *);
   1085 static void	wm_put_null(struct wm_softc *);
   1086 static int	wm_get_eecd(struct wm_softc *);
   1087 static void	wm_put_eecd(struct wm_softc *);
   1088 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
   1089 static void	wm_put_swsm_semaphore(struct wm_softc *);
   1090 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
   1091 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
   1092 static int	wm_get_nvm_80003(struct wm_softc *);
   1093 static void	wm_put_nvm_80003(struct wm_softc *);
   1094 static int	wm_get_nvm_82571(struct wm_softc *);
   1095 static void	wm_put_nvm_82571(struct wm_softc *);
   1096 static int	wm_get_phy_82575(struct wm_softc *);
   1097 static void	wm_put_phy_82575(struct wm_softc *);
   1098 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
   1099 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
   1100 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
   1101 static void	wm_put_swflag_ich8lan(struct wm_softc *);
   1102 static int	wm_get_nvm_ich8lan(struct wm_softc *);
   1103 static void	wm_put_nvm_ich8lan(struct wm_softc *);
   1104 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
   1105 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
   1106 
   1107 /*
   1108  * Management mode and power management related subroutines.
   1109  * BMC, AMT, suspend/resume and EEE.
   1110  */
   1111 #if 0
   1112 static int	wm_check_mng_mode(struct wm_softc *);
   1113 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
   1114 static int	wm_check_mng_mode_82574(struct wm_softc *);
   1115 static int	wm_check_mng_mode_generic(struct wm_softc *);
   1116 #endif
   1117 static int	wm_enable_mng_pass_thru(struct wm_softc *);
   1118 static bool	wm_phy_resetisblocked(struct wm_softc *);
   1119 static void	wm_get_hw_control(struct wm_softc *);
   1120 static void	wm_release_hw_control(struct wm_softc *);
   1121 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
   1122 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
   1123 static void	wm_init_manageability(struct wm_softc *);
   1124 static void	wm_release_manageability(struct wm_softc *);
   1125 static void	wm_get_wakeup(struct wm_softc *);
   1126 static int	wm_ulp_disable(struct wm_softc *);
   1127 static int	wm_enable_phy_wakeup(struct wm_softc *);
   1128 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
   1129 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
   1130 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1131 static void	wm_enable_wakeup(struct wm_softc *);
   1132 static void	wm_disable_aspm(struct wm_softc *);
   1133 /* LPLU (Low Power Link Up) */
   1134 static void	wm_lplu_d0_disable(struct wm_softc *);
   1135 /* EEE */
   1136 static int	wm_set_eee_i350(struct wm_softc *);
   1137 static int	wm_set_eee_pchlan(struct wm_softc *);
   1138 static int	wm_set_eee(struct wm_softc *);
   1139 
   1140 /*
   1141  * Workarounds (mainly PHY related).
   1142  * Basically, PHY's workarounds are in the PHY drivers.
   1143  */
   1144 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1145 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1146 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1147 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1148 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
   1149 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
   1150 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1151 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1152 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1153 static int	wm_k1_workaround_lv(struct wm_softc *);
   1154 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1155 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1156 static int	wm_set_mdio_slow_mode_hv_locked(struct wm_softc *);
   1157 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1158 static void	wm_reset_init_script_82575(struct wm_softc *);
   1159 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1160 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1161 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1162 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1163 static int	wm_pll_workaround_i210(struct wm_softc *);
   1164 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1165 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
   1166 static void	wm_set_linkdown_discard(struct wm_softc *);
   1167 static void	wm_clear_linkdown_discard(struct wm_softc *);
   1168 
   1169 static int	wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
   1170 static int	wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
   1171 #ifdef WM_DEBUG
   1172 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
   1173 #endif
   1174 
   1175 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1176     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1177 
   1178 /*
   1179  * Devices supported by this driver.
   1180  */
   1181 static const struct wm_product {
   1182 	pci_vendor_id_t		wmp_vendor;
   1183 	pci_product_id_t	wmp_product;
   1184 	const char		*wmp_name;
   1185 	wm_chip_type		wmp_type;
   1186 	uint32_t		wmp_flags;
   1187 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1188 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1189 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1190 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1191 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1192 } wm_products[] = {
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1194 	  "Intel i82542 1000BASE-X Ethernet",
   1195 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1196 
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1198 	  "Intel i82543GC 1000BASE-X Ethernet",
   1199 	  WM_T_82543,		WMP_F_FIBER },
   1200 
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1202 	  "Intel i82543GC 1000BASE-T Ethernet",
   1203 	  WM_T_82543,		WMP_F_COPPER },
   1204 
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1206 	  "Intel i82544EI 1000BASE-T Ethernet",
   1207 	  WM_T_82544,		WMP_F_COPPER },
   1208 
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1210 	  "Intel i82544EI 1000BASE-X Ethernet",
   1211 	  WM_T_82544,		WMP_F_FIBER },
   1212 
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1214 	  "Intel i82544GC 1000BASE-T Ethernet",
   1215 	  WM_T_82544,		WMP_F_COPPER },
   1216 
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1218 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1219 	  WM_T_82544,		WMP_F_COPPER },
   1220 
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1222 	  "Intel i82540EM 1000BASE-T Ethernet",
   1223 	  WM_T_82540,		WMP_F_COPPER },
   1224 
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1226 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1227 	  WM_T_82540,		WMP_F_COPPER },
   1228 
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1230 	  "Intel i82540EP 1000BASE-T Ethernet",
   1231 	  WM_T_82540,		WMP_F_COPPER },
   1232 
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1234 	  "Intel i82540EP 1000BASE-T Ethernet",
   1235 	  WM_T_82540,		WMP_F_COPPER },
   1236 
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1238 	  "Intel i82540EP 1000BASE-T Ethernet",
   1239 	  WM_T_82540,		WMP_F_COPPER },
   1240 
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1242 	  "Intel i82545EM 1000BASE-T Ethernet",
   1243 	  WM_T_82545,		WMP_F_COPPER },
   1244 
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1246 	  "Intel i82545GM 1000BASE-T Ethernet",
   1247 	  WM_T_82545_3,		WMP_F_COPPER },
   1248 
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1250 	  "Intel i82545GM 1000BASE-X Ethernet",
   1251 	  WM_T_82545_3,		WMP_F_FIBER },
   1252 
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1254 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1255 	  WM_T_82545_3,		WMP_F_SERDES },
   1256 
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1258 	  "Intel i82546EB 1000BASE-T Ethernet",
   1259 	  WM_T_82546,		WMP_F_COPPER },
   1260 
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1262 	  "Intel i82546EB 1000BASE-T Ethernet",
   1263 	  WM_T_82546,		WMP_F_COPPER },
   1264 
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1266 	  "Intel i82545EM 1000BASE-X Ethernet",
   1267 	  WM_T_82545,		WMP_F_FIBER },
   1268 
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1270 	  "Intel i82546EB 1000BASE-X Ethernet",
   1271 	  WM_T_82546,		WMP_F_FIBER },
   1272 
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1274 	  "Intel i82546GB 1000BASE-T Ethernet",
   1275 	  WM_T_82546_3,		WMP_F_COPPER },
   1276 
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1278 	  "Intel i82546GB 1000BASE-X Ethernet",
   1279 	  WM_T_82546_3,		WMP_F_FIBER },
   1280 
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1282 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1283 	  WM_T_82546_3,		WMP_F_SERDES },
   1284 
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1286 	  "i82546GB quad-port Gigabit Ethernet",
   1287 	  WM_T_82546_3,		WMP_F_COPPER },
   1288 
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1290 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1291 	  WM_T_82546_3,		WMP_F_COPPER },
   1292 
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1294 	  "Intel PRO/1000MT (82546GB)",
   1295 	  WM_T_82546_3,		WMP_F_COPPER },
   1296 
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1298 	  "Intel i82541EI 1000BASE-T Ethernet",
   1299 	  WM_T_82541,		WMP_F_COPPER },
   1300 
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1302 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1303 	  WM_T_82541,		WMP_F_COPPER },
   1304 
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1306 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1307 	  WM_T_82541,		WMP_F_COPPER },
   1308 
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1310 	  "Intel i82541ER 1000BASE-T Ethernet",
   1311 	  WM_T_82541_2,		WMP_F_COPPER },
   1312 
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1314 	  "Intel i82541GI 1000BASE-T Ethernet",
   1315 	  WM_T_82541_2,		WMP_F_COPPER },
   1316 
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1318 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1319 	  WM_T_82541_2,		WMP_F_COPPER },
   1320 
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1322 	  "Intel i82541PI 1000BASE-T Ethernet",
   1323 	  WM_T_82541_2,		WMP_F_COPPER },
   1324 
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1326 	  "Intel i82547EI 1000BASE-T Ethernet",
   1327 	  WM_T_82547,		WMP_F_COPPER },
   1328 
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1330 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1331 	  WM_T_82547,		WMP_F_COPPER },
   1332 
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1334 	  "Intel i82547GI 1000BASE-T Ethernet",
   1335 	  WM_T_82547_2,		WMP_F_COPPER },
   1336 
   1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1338 	  "Intel PRO/1000 PT (82571EB)",
   1339 	  WM_T_82571,		WMP_F_COPPER },
   1340 
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1342 	  "Intel PRO/1000 PF (82571EB)",
   1343 	  WM_T_82571,		WMP_F_FIBER },
   1344 
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1346 	  "Intel PRO/1000 PB (82571EB)",
   1347 	  WM_T_82571,		WMP_F_SERDES },
   1348 
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1350 	  "Intel PRO/1000 QT (82571EB)",
   1351 	  WM_T_82571,		WMP_F_COPPER },
   1352 
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1354 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1355 	  WM_T_82571,		WMP_F_COPPER },
   1356 
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1358 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1359 	  WM_T_82571,		WMP_F_COPPER },
   1360 
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1362 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1363 	  WM_T_82571,		WMP_F_SERDES },
   1364 
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1366 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1367 	  WM_T_82571,		WMP_F_SERDES },
   1368 
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1370 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1371 	  WM_T_82571,		WMP_F_FIBER },
   1372 
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1374 	  "Intel i82572EI 1000baseT Ethernet",
   1375 	  WM_T_82572,		WMP_F_COPPER },
   1376 
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1378 	  "Intel i82572EI 1000baseX Ethernet",
   1379 	  WM_T_82572,		WMP_F_FIBER },
   1380 
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1382 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1383 	  WM_T_82572,		WMP_F_SERDES },
   1384 
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1386 	  "Intel i82572EI 1000baseT Ethernet",
   1387 	  WM_T_82572,		WMP_F_COPPER },
   1388 
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1390 	  "Intel i82573E",
   1391 	  WM_T_82573,		WMP_F_COPPER },
   1392 
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1394 	  "Intel i82573E IAMT",
   1395 	  WM_T_82573,		WMP_F_COPPER },
   1396 
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1398 	  "Intel i82573L Gigabit Ethernet",
   1399 	  WM_T_82573,		WMP_F_COPPER },
   1400 
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1402 	  "Intel i82574L",
   1403 	  WM_T_82574,		WMP_F_COPPER },
   1404 
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1406 	  "Intel i82574L",
   1407 	  WM_T_82574,		WMP_F_COPPER },
   1408 
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1410 	  "Intel i82583V",
   1411 	  WM_T_82583,		WMP_F_COPPER },
   1412 
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1414 	  "i80003 dual 1000baseT Ethernet",
   1415 	  WM_T_80003,		WMP_F_COPPER },
   1416 
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1418 	  "i80003 dual 1000baseX Ethernet",
   1419 	  WM_T_80003,		WMP_F_COPPER },
   1420 
   1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1422 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1423 	  WM_T_80003,		WMP_F_SERDES },
   1424 
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1426 	  "Intel i80003 1000baseT Ethernet",
   1427 	  WM_T_80003,		WMP_F_COPPER },
   1428 
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1430 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1431 	  WM_T_80003,		WMP_F_SERDES },
   1432 
   1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1434 	  "Intel i82801H (M_AMT) LAN Controller",
   1435 	  WM_T_ICH8,		WMP_F_COPPER },
   1436 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1437 	  "Intel i82801H (AMT) LAN Controller",
   1438 	  WM_T_ICH8,		WMP_F_COPPER },
   1439 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1440 	  "Intel i82801H LAN Controller",
   1441 	  WM_T_ICH8,		WMP_F_COPPER },
   1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1443 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1444 	  WM_T_ICH8,		WMP_F_COPPER },
   1445 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1446 	  "Intel i82801H (M) LAN Controller",
   1447 	  WM_T_ICH8,		WMP_F_COPPER },
   1448 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1449 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1450 	  WM_T_ICH8,		WMP_F_COPPER },
   1451 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1452 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1453 	  WM_T_ICH8,		WMP_F_COPPER },
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1455 	  "82567V-3 LAN Controller",
   1456 	  WM_T_ICH8,		WMP_F_COPPER },
   1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1458 	  "82801I (AMT) LAN Controller",
   1459 	  WM_T_ICH9,		WMP_F_COPPER },
   1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1461 	  "82801I 10/100 LAN Controller",
   1462 	  WM_T_ICH9,		WMP_F_COPPER },
   1463 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1464 	  "82801I (G) 10/100 LAN Controller",
   1465 	  WM_T_ICH9,		WMP_F_COPPER },
   1466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1467 	  "82801I (GT) 10/100 LAN Controller",
   1468 	  WM_T_ICH9,		WMP_F_COPPER },
   1469 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1470 	  "82801I (C) LAN Controller",
   1471 	  WM_T_ICH9,		WMP_F_COPPER },
   1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1473 	  "82801I mobile LAN Controller",
   1474 	  WM_T_ICH9,		WMP_F_COPPER },
   1475 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1476 	  "82801I mobile (V) LAN Controller",
   1477 	  WM_T_ICH9,		WMP_F_COPPER },
   1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1479 	  "82801I mobile (AMT) LAN Controller",
   1480 	  WM_T_ICH9,		WMP_F_COPPER },
   1481 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1482 	  "82567LM-4 LAN Controller",
   1483 	  WM_T_ICH9,		WMP_F_COPPER },
   1484 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1485 	  "82567LM-2 LAN Controller",
   1486 	  WM_T_ICH10,		WMP_F_COPPER },
   1487 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1488 	  "82567LF-2 LAN Controller",
   1489 	  WM_T_ICH10,		WMP_F_COPPER },
   1490 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1491 	  "82567LM-3 LAN Controller",
   1492 	  WM_T_ICH10,		WMP_F_COPPER },
   1493 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1494 	  "82567LF-3 LAN Controller",
   1495 	  WM_T_ICH10,		WMP_F_COPPER },
   1496 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1497 	  "82567V-2 LAN Controller",
   1498 	  WM_T_ICH10,		WMP_F_COPPER },
   1499 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1500 	  "82567V-3? LAN Controller",
   1501 	  WM_T_ICH10,		WMP_F_COPPER },
   1502 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1503 	  "HANKSVILLE LAN Controller",
   1504 	  WM_T_ICH10,		WMP_F_COPPER },
   1505 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1506 	  "PCH LAN (82577LM) Controller",
   1507 	  WM_T_PCH,		WMP_F_COPPER },
   1508 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1509 	  "PCH LAN (82577LC) Controller",
   1510 	  WM_T_PCH,		WMP_F_COPPER },
   1511 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1512 	  "PCH LAN (82578DM) Controller",
   1513 	  WM_T_PCH,		WMP_F_COPPER },
   1514 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1515 	  "PCH LAN (82578DC) Controller",
   1516 	  WM_T_PCH,		WMP_F_COPPER },
   1517 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1518 	  "PCH2 LAN (82579LM) Controller",
   1519 	  WM_T_PCH2,		WMP_F_COPPER },
   1520 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1521 	  "PCH2 LAN (82579V) Controller",
   1522 	  WM_T_PCH2,		WMP_F_COPPER },
   1523 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1524 	  "82575EB dual-1000baseT Ethernet",
   1525 	  WM_T_82575,		WMP_F_COPPER },
   1526 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1527 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1528 	  WM_T_82575,		WMP_F_SERDES },
   1529 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1530 	  "82575GB quad-1000baseT Ethernet",
   1531 	  WM_T_82575,		WMP_F_COPPER },
   1532 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1533 	  "82575GB quad-1000baseT Ethernet (PM)",
   1534 	  WM_T_82575,		WMP_F_COPPER },
   1535 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1536 	  "82576 1000BaseT Ethernet",
   1537 	  WM_T_82576,		WMP_F_COPPER },
   1538 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1539 	  "82576 1000BaseX Ethernet",
   1540 	  WM_T_82576,		WMP_F_FIBER },
   1541 
   1542 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1543 	  "82576 gigabit Ethernet (SERDES)",
   1544 	  WM_T_82576,		WMP_F_SERDES },
   1545 
   1546 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1547 	  "82576 quad-1000BaseT Ethernet",
   1548 	  WM_T_82576,		WMP_F_COPPER },
   1549 
   1550 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1551 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1552 	  WM_T_82576,		WMP_F_COPPER },
   1553 
   1554 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1555 	  "82576 gigabit Ethernet",
   1556 	  WM_T_82576,		WMP_F_COPPER },
   1557 
   1558 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1559 	  "82576 gigabit Ethernet (SERDES)",
   1560 	  WM_T_82576,		WMP_F_SERDES },
   1561 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1562 	  "82576 quad-gigabit Ethernet (SERDES)",
   1563 	  WM_T_82576,		WMP_F_SERDES },
   1564 
   1565 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1566 	  "82580 1000BaseT Ethernet",
   1567 	  WM_T_82580,		WMP_F_COPPER },
   1568 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1569 	  "82580 1000BaseX Ethernet",
   1570 	  WM_T_82580,		WMP_F_FIBER },
   1571 
   1572 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1573 	  "82580 1000BaseT Ethernet (SERDES)",
   1574 	  WM_T_82580,		WMP_F_SERDES },
   1575 
   1576 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1577 	  "82580 gigabit Ethernet (SGMII)",
   1578 	  WM_T_82580,		WMP_F_COPPER },
   1579 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1580 	  "82580 dual-1000BaseT Ethernet",
   1581 	  WM_T_82580,		WMP_F_COPPER },
   1582 
   1583 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1584 	  "82580 quad-1000BaseX Ethernet",
   1585 	  WM_T_82580,		WMP_F_FIBER },
   1586 
   1587 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1588 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1589 	  WM_T_82580,		WMP_F_COPPER },
   1590 
   1591 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1592 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1593 	  WM_T_82580,		WMP_F_SERDES },
   1594 
   1595 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1596 	  "DH89XXCC 1000BASE-KX Ethernet",
   1597 	  WM_T_82580,		WMP_F_SERDES },
   1598 
   1599 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1600 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1601 	  WM_T_82580,		WMP_F_SERDES },
   1602 
   1603 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1604 	  "I350 Gigabit Network Connection",
   1605 	  WM_T_I350,		WMP_F_COPPER },
   1606 
   1607 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1608 	  "I350 Gigabit Fiber Network Connection",
   1609 	  WM_T_I350,		WMP_F_FIBER },
   1610 
   1611 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1612 	  "I350 Gigabit Backplane Connection",
   1613 	  WM_T_I350,		WMP_F_SERDES },
   1614 
   1615 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1616 	  "I350 Quad Port Gigabit Ethernet",
   1617 	  WM_T_I350,		WMP_F_SERDES },
   1618 
   1619 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1620 	  "I350 Gigabit Connection",
   1621 	  WM_T_I350,		WMP_F_COPPER },
   1622 
   1623 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1624 	  "I354 Gigabit Ethernet (KX)",
   1625 	  WM_T_I354,		WMP_F_SERDES },
   1626 
   1627 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1628 	  "I354 Gigabit Ethernet (SGMII)",
   1629 	  WM_T_I354,		WMP_F_COPPER },
   1630 
   1631 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1632 	  "I354 Gigabit Ethernet (2.5G)",
   1633 	  WM_T_I354,		WMP_F_COPPER },
   1634 
   1635 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1636 	  "I210-T1 Ethernet Server Adapter",
   1637 	  WM_T_I210,		WMP_F_COPPER },
   1638 
   1639 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1640 	  "I210 Ethernet (Copper OEM)",
   1641 	  WM_T_I210,		WMP_F_COPPER },
   1642 
   1643 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1644 	  "I210 Ethernet (Copper IT)",
   1645 	  WM_T_I210,		WMP_F_COPPER },
   1646 
   1647 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1648 	  "I210 Ethernet (Copper, FLASH less)",
   1649 	  WM_T_I210,		WMP_F_COPPER },
   1650 
   1651 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1652 	  "I210 Gigabit Ethernet (Fiber)",
   1653 	  WM_T_I210,		WMP_F_FIBER },
   1654 
   1655 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1656 	  "I210 Gigabit Ethernet (SERDES)",
   1657 	  WM_T_I210,		WMP_F_SERDES },
   1658 
   1659 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1660 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1661 	  WM_T_I210,		WMP_F_SERDES },
   1662 
   1663 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1664 	  "I210 Gigabit Ethernet (SGMII)",
   1665 	  WM_T_I210,		WMP_F_COPPER },
   1666 
   1667 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1668 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1669 	  WM_T_I210,		WMP_F_COPPER },
   1670 
   1671 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1672 	  "I211 Ethernet (COPPER)",
   1673 	  WM_T_I211,		WMP_F_COPPER },
   1674 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1675 	  "I217 V Ethernet Connection",
   1676 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1677 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1678 	  "I217 LM Ethernet Connection",
   1679 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1680 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1681 	  "I218 V Ethernet Connection",
   1682 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1683 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1684 	  "I218 V Ethernet Connection",
   1685 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1686 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1687 	  "I218 V Ethernet Connection",
   1688 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1689 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1690 	  "I218 LM Ethernet Connection",
   1691 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1692 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1693 	  "I218 LM Ethernet Connection",
   1694 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1695 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1696 	  "I218 LM Ethernet Connection",
   1697 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1698 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1699 	  "I219 LM Ethernet Connection",
   1700 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1701 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1702 	  "I219 LM (2) Ethernet Connection",
   1703 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1704 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1705 	  "I219 LM (3) Ethernet Connection",
   1706 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1707 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1708 	  "I219 LM (4) Ethernet Connection",
   1709 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1710 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1711 	  "I219 LM (5) Ethernet Connection",
   1712 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1713 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1714 	  "I219 LM (6) Ethernet Connection",
   1715 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1716 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1717 	  "I219 LM (7) Ethernet Connection",
   1718 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1719 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1720 	  "I219 LM (8) Ethernet Connection",
   1721 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1722 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1723 	  "I219 LM (9) Ethernet Connection",
   1724 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1725 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1726 	  "I219 LM (10) Ethernet Connection",
   1727 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1728 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1729 	  "I219 LM (11) Ethernet Connection",
   1730 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1731 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1732 	  "I219 LM (12) Ethernet Connection",
   1733 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1734 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1735 	  "I219 LM (13) Ethernet Connection",
   1736 	  WM_T_PCH_TGP,		WMP_F_COPPER },
   1737 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1738 	  "I219 LM (14) Ethernet Connection",
   1739 	  WM_T_PCH_TGP,		WMP_F_COPPER },
   1740 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1741 	  "I219 LM (15) Ethernet Connection",
   1742 	  WM_T_PCH_TGP,		WMP_F_COPPER },
   1743 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM16,
   1744 	  "I219 LM (16) Ethernet Connection",
   1745 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP */
   1746 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM17,
   1747 	  "I219 LM (17) Ethernet Connection",
   1748 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP */
   1749 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM18,
   1750 	  "I219 LM (18) Ethernet Connection",
   1751 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
   1752 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM19,
   1753 	  "I219 LM (19) Ethernet Connection",
   1754 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
   1755 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM22,
   1756 	  "I219 LM (22) Ethernet Connection",
   1757 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP(RPL) */
   1758 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM23,
   1759 	  "I219 LM (23) Ethernet Connection",
   1760 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP(RPL) */
   1761 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1762 	  "I219 V Ethernet Connection",
   1763 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1764 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1765 	  "I219 V (2) Ethernet Connection",
   1766 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1767 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1768 	  "I219 V (4) Ethernet Connection",
   1769 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1770 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1771 	  "I219 V (5) Ethernet Connection",
   1772 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1773 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1774 	  "I219 V (6) Ethernet Connection",
   1775 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1776 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1777 	  "I219 V (7) Ethernet Connection",
   1778 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1779 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1780 	  "I219 V (8) Ethernet Connection",
   1781 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1782 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1783 	  "I219 V (9) Ethernet Connection",
   1784 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1785 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1786 	  "I219 V (10) Ethernet Connection",
   1787 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1788 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1789 	  "I219 V (11) Ethernet Connection",
   1790 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1791 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1792 	  "I219 V (12) Ethernet Connection",
   1793 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1794 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1795 	  "I219 V (13) Ethernet Connection",
   1796 	  WM_T_PCH_TGP,		WMP_F_COPPER },
   1797 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1798 	  "I219 V (14) Ethernet Connection",
   1799 	  WM_T_PCH_TGP,		WMP_F_COPPER },
   1800 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V15,
   1801 	  "I219 V (15) Ethernet Connection",
   1802 	  WM_T_PCH_TGP,		WMP_F_COPPER },
   1803 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V16,
   1804 	  "I219 V (16) Ethernet Connection",
   1805 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP */
   1806 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V17,
   1807 	  "I219 V (17) Ethernet Connection",
   1808 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP */
   1809 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V18,
   1810 	  "I219 V (18) Ethernet Connection",
   1811 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
   1812 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V19,
   1813 	  "I219 V (19) Ethernet Connection",
   1814 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
   1815 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V22,
   1816 	  "I219 V (22) Ethernet Connection",
   1817 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP(RPL) */
   1818 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V23,
   1819 	  "I219 V (23) Ethernet Connection",
   1820 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP(RPL) */
   1821 	{ 0,			0,
   1822 	  NULL,
   1823 	  0,			0 },
   1824 };
   1825 
   1826 /*
   1827  * Register read/write functions.
   1828  * Other than CSR_{READ|WRITE}().
   1829  */
   1830 
   1831 #if 0 /* Not currently used */
   1832 static inline uint32_t
   1833 wm_io_read(struct wm_softc *sc, int reg)
   1834 {
   1835 
   1836 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1837 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1838 }
   1839 #endif
   1840 
   1841 static inline void
   1842 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1843 {
   1844 
   1845 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1846 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1847 }
   1848 
   1849 static inline void
   1850 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1851     uint32_t data)
   1852 {
   1853 	uint32_t regval;
   1854 	int i;
   1855 
   1856 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1857 
   1858 	CSR_WRITE(sc, reg, regval);
   1859 
   1860 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1861 		delay(5);
   1862 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1863 			break;
   1864 	}
   1865 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1866 		aprint_error("%s: WARNING:"
   1867 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1868 		    device_xname(sc->sc_dev), reg);
   1869 	}
   1870 }
   1871 
   1872 static inline void
   1873 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1874 {
   1875 	wa->wa_low = htole32(BUS_ADDR_LO32(v));
   1876 	wa->wa_high = htole32(BUS_ADDR_HI32(v));
   1877 }
   1878 
   1879 /*
   1880  * Descriptor sync/init functions.
   1881  */
   1882 static inline void
   1883 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1884 {
   1885 	struct wm_softc *sc = txq->txq_sc;
   1886 
   1887 	/* If it will wrap around, sync to the end of the ring. */
   1888 	if ((start + num) > WM_NTXDESC(txq)) {
   1889 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1890 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1891 		    (WM_NTXDESC(txq) - start), ops);
   1892 		num -= (WM_NTXDESC(txq) - start);
   1893 		start = 0;
   1894 	}
   1895 
   1896 	/* Now sync whatever is left. */
   1897 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1898 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1899 }
   1900 
   1901 static inline void
   1902 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1903 {
   1904 	struct wm_softc *sc = rxq->rxq_sc;
   1905 
   1906 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1907 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1908 }
   1909 
   1910 static inline void
   1911 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1912 {
   1913 	struct wm_softc *sc = rxq->rxq_sc;
   1914 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1915 	struct mbuf *m = rxs->rxs_mbuf;
   1916 
   1917 	/*
   1918 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1919 	 * so that the payload after the Ethernet header is aligned
   1920 	 * to a 4-byte boundary.
   1921 
   1922 	 * XXX BRAINDAMAGE ALERT!
   1923 	 * The stupid chip uses the same size for every buffer, which
   1924 	 * is set in the Receive Control register.  We are using the 2K
   1925 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1926 	 * reason, we can't "scoot" packets longer than the standard
   1927 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1928 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1929 	 * the upper layer copy the headers.
   1930 	 */
   1931 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1932 
   1933 	if (sc->sc_type == WM_T_82574) {
   1934 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1935 		rxd->erx_data.erxd_addr =
   1936 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1937 		rxd->erx_data.erxd_dd = 0;
   1938 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1939 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1940 
   1941 		rxd->nqrx_data.nrxd_paddr =
   1942 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1943 		/* Currently, split header is not supported. */
   1944 		rxd->nqrx_data.nrxd_haddr = 0;
   1945 	} else {
   1946 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1947 
   1948 		wm_set_dma_addr(&rxd->wrx_addr,
   1949 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1950 		rxd->wrx_len = 0;
   1951 		rxd->wrx_cksum = 0;
   1952 		rxd->wrx_status = 0;
   1953 		rxd->wrx_errors = 0;
   1954 		rxd->wrx_special = 0;
   1955 	}
   1956 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1957 
   1958 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1959 }
   1960 
   1961 /*
   1962  * Device driver interface functions and commonly used functions.
   1963  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1964  */
   1965 
   1966 /* Lookup supported device table */
   1967 static const struct wm_product *
   1968 wm_lookup(const struct pci_attach_args *pa)
   1969 {
   1970 	const struct wm_product *wmp;
   1971 
   1972 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1973 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1974 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1975 			return wmp;
   1976 	}
   1977 	return NULL;
   1978 }
   1979 
   1980 /* The match function (ca_match) */
   1981 static int
   1982 wm_match(device_t parent, cfdata_t cf, void *aux)
   1983 {
   1984 	struct pci_attach_args *pa = aux;
   1985 
   1986 	if (wm_lookup(pa) != NULL)
   1987 		return 1;
   1988 
   1989 	return 0;
   1990 }
   1991 
   1992 /* The attach function (ca_attach) */
   1993 static void
   1994 wm_attach(device_t parent, device_t self, void *aux)
   1995 {
   1996 	struct wm_softc *sc = device_private(self);
   1997 	struct pci_attach_args *pa = aux;
   1998 	prop_dictionary_t dict;
   1999 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2000 	pci_chipset_tag_t pc = pa->pa_pc;
   2001 	int counts[PCI_INTR_TYPE_SIZE];
   2002 	pci_intr_type_t max_type;
   2003 	const char *eetype, *xname;
   2004 	bus_space_tag_t memt;
   2005 	bus_space_handle_t memh;
   2006 	bus_size_t memsize;
   2007 	int memh_valid;
   2008 	int i, error;
   2009 	const struct wm_product *wmp;
   2010 	prop_data_t ea;
   2011 	prop_number_t pn;
   2012 	uint8_t enaddr[ETHER_ADDR_LEN];
   2013 	char buf[256];
   2014 	char wqname[MAXCOMLEN];
   2015 	uint16_t cfg1, cfg2, swdpin, nvmword;
   2016 	pcireg_t preg, memtype;
   2017 	uint16_t eeprom_data, apme_mask;
   2018 	bool force_clear_smbi;
   2019 	uint32_t link_mode;
   2020 	uint32_t reg;
   2021 
   2022 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
   2023 	sc->sc_debug = WM_DEBUG_DEFAULT;
   2024 #endif
   2025 	sc->sc_dev = self;
   2026 	callout_init(&sc->sc_tick_ch, CALLOUT_MPSAFE);
   2027 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   2028 	sc->sc_core_stopping = false;
   2029 
   2030 	wmp = wm_lookup(pa);
   2031 #ifdef DIAGNOSTIC
   2032 	if (wmp == NULL) {
   2033 		printf("\n");
   2034 		panic("wm_attach: impossible");
   2035 	}
   2036 #endif
   2037 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   2038 
   2039 	sc->sc_pc = pa->pa_pc;
   2040 	sc->sc_pcitag = pa->pa_tag;
   2041 
   2042 	if (pci_dma64_available(pa)) {
   2043 		aprint_verbose(", 64-bit DMA");
   2044 		sc->sc_dmat = pa->pa_dmat64;
   2045 	} else {
   2046 		aprint_verbose(", 32-bit DMA");
   2047 		sc->sc_dmat = pa->pa_dmat;
   2048 	}
   2049 
   2050 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   2051 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   2052 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   2053 
   2054 	sc->sc_type = wmp->wmp_type;
   2055 
   2056 	/* Set default function pointers */
   2057 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   2058 	sc->phy.release = sc->nvm.release = wm_put_null;
   2059 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   2060 
   2061 	if (sc->sc_type < WM_T_82543) {
   2062 		if (sc->sc_rev < 2) {
   2063 			aprint_error_dev(sc->sc_dev,
   2064 			    "i82542 must be at least rev. 2\n");
   2065 			return;
   2066 		}
   2067 		if (sc->sc_rev < 3)
   2068 			sc->sc_type = WM_T_82542_2_0;
   2069 	}
   2070 
   2071 	/*
   2072 	 * Disable MSI for Errata:
   2073 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   2074 	 *
   2075 	 *  82544: Errata 25
   2076 	 *  82540: Errata  6 (easy to reproduce device timeout)
   2077 	 *  82545: Errata  4 (easy to reproduce device timeout)
   2078 	 *  82546: Errata 26 (easy to reproduce device timeout)
   2079 	 *  82541: Errata  7 (easy to reproduce device timeout)
   2080 	 *
   2081 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   2082 	 *
   2083 	 *  82571 & 82572: Errata 63
   2084 	 */
   2085 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   2086 	    || (sc->sc_type == WM_T_82572))
   2087 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   2088 
   2089 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2090 	    || (sc->sc_type == WM_T_82580)
   2091 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   2092 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   2093 		sc->sc_flags |= WM_F_NEWQUEUE;
   2094 
   2095 	/* Set device properties (mactype) */
   2096 	dict = device_properties(sc->sc_dev);
   2097 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   2098 
   2099 	/*
   2100 	 * Map the device.  All devices support memory-mapped acccess,
   2101 	 * and it is really required for normal operation.
   2102 	 */
   2103 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   2104 	switch (memtype) {
   2105 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   2106 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   2107 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   2108 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   2109 		break;
   2110 	default:
   2111 		memh_valid = 0;
   2112 		break;
   2113 	}
   2114 
   2115 	if (memh_valid) {
   2116 		sc->sc_st = memt;
   2117 		sc->sc_sh = memh;
   2118 		sc->sc_ss = memsize;
   2119 	} else {
   2120 		aprint_error_dev(sc->sc_dev,
   2121 		    "unable to map device registers\n");
   2122 		return;
   2123 	}
   2124 
   2125 	/*
   2126 	 * In addition, i82544 and later support I/O mapped indirect
   2127 	 * register access.  It is not desirable (nor supported in
   2128 	 * this driver) to use it for normal operation, though it is
   2129 	 * required to work around bugs in some chip versions.
   2130 	 */
   2131 	switch (sc->sc_type) {
   2132 	case WM_T_82544:
   2133 	case WM_T_82541:
   2134 	case WM_T_82541_2:
   2135 	case WM_T_82547:
   2136 	case WM_T_82547_2:
   2137 		/* First we have to find the I/O BAR. */
   2138 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   2139 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   2140 			if (memtype == PCI_MAPREG_TYPE_IO)
   2141 				break;
   2142 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   2143 			    PCI_MAPREG_MEM_TYPE_64BIT)
   2144 				i += 4;	/* skip high bits, too */
   2145 		}
   2146 		if (i < PCI_MAPREG_END) {
   2147 			/*
   2148 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   2149 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   2150 			 * It's no problem because newer chips has no this
   2151 			 * bug.
   2152 			 *
   2153 			 * The i8254x doesn't apparently respond when the
   2154 			 * I/O BAR is 0, which looks somewhat like it's not
   2155 			 * been configured.
   2156 			 */
   2157 			preg = pci_conf_read(pc, pa->pa_tag, i);
   2158 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   2159 				aprint_error_dev(sc->sc_dev,
   2160 				    "WARNING: I/O BAR at zero.\n");
   2161 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   2162 			    0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)
   2163 			    == 0) {
   2164 				sc->sc_flags |= WM_F_IOH_VALID;
   2165 			} else
   2166 				aprint_error_dev(sc->sc_dev,
   2167 				    "WARNING: unable to map I/O space\n");
   2168 		}
   2169 		break;
   2170 	default:
   2171 		break;
   2172 	}
   2173 
   2174 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   2175 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   2176 	preg |= PCI_COMMAND_MASTER_ENABLE;
   2177 	if (sc->sc_type < WM_T_82542_2_1)
   2178 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   2179 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   2180 
   2181 	/* Power up chip */
   2182 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   2183 	    && error != EOPNOTSUPP) {
   2184 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   2185 		return;
   2186 	}
   2187 
   2188 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   2189 	/*
   2190 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   2191 	 * resource.
   2192 	 */
   2193 	if (sc->sc_nqueues > 1) {
   2194 		max_type = PCI_INTR_TYPE_MSIX;
   2195 		/*
   2196 		 *  82583 has a MSI-X capability in the PCI configuration space
   2197 		 * but it doesn't support it. At least the document doesn't
   2198 		 * say anything about MSI-X.
   2199 		 */
   2200 		counts[PCI_INTR_TYPE_MSIX]
   2201 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2202 	} else {
   2203 		max_type = PCI_INTR_TYPE_MSI;
   2204 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2205 	}
   2206 
   2207 	/* Allocation settings */
   2208 	counts[PCI_INTR_TYPE_MSI] = 1;
   2209 	counts[PCI_INTR_TYPE_INTX] = 1;
   2210 	/* overridden by disable flags */
   2211 	if (wm_disable_msi != 0) {
   2212 		counts[PCI_INTR_TYPE_MSI] = 0;
   2213 		if (wm_disable_msix != 0) {
   2214 			max_type = PCI_INTR_TYPE_INTX;
   2215 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2216 		}
   2217 	} else if (wm_disable_msix != 0) {
   2218 		max_type = PCI_INTR_TYPE_MSI;
   2219 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2220 	}
   2221 
   2222 alloc_retry:
   2223 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2224 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2225 		return;
   2226 	}
   2227 
   2228 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2229 		error = wm_setup_msix(sc);
   2230 		if (error) {
   2231 			pci_intr_release(pc, sc->sc_intrs,
   2232 			    counts[PCI_INTR_TYPE_MSIX]);
   2233 
   2234 			/* Setup for MSI: Disable MSI-X */
   2235 			max_type = PCI_INTR_TYPE_MSI;
   2236 			counts[PCI_INTR_TYPE_MSI] = 1;
   2237 			counts[PCI_INTR_TYPE_INTX] = 1;
   2238 			goto alloc_retry;
   2239 		}
   2240 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2241 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2242 		error = wm_setup_legacy(sc);
   2243 		if (error) {
   2244 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2245 			    counts[PCI_INTR_TYPE_MSI]);
   2246 
   2247 			/* The next try is for INTx: Disable MSI */
   2248 			max_type = PCI_INTR_TYPE_INTX;
   2249 			counts[PCI_INTR_TYPE_INTX] = 1;
   2250 			goto alloc_retry;
   2251 		}
   2252 	} else {
   2253 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2254 		error = wm_setup_legacy(sc);
   2255 		if (error) {
   2256 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2257 			    counts[PCI_INTR_TYPE_INTX]);
   2258 			return;
   2259 		}
   2260 	}
   2261 
   2262 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2263 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2264 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2265 	    WQ_PERCPU | WQ_MPSAFE);
   2266 	if (error) {
   2267 		aprint_error_dev(sc->sc_dev,
   2268 		    "unable to create TxRx workqueue\n");
   2269 		goto out;
   2270 	}
   2271 
   2272 	snprintf(wqname, sizeof(wqname), "%sReset", device_xname(sc->sc_dev));
   2273 	error = workqueue_create(&sc->sc_reset_wq, wqname,
   2274 	    wm_handle_reset_work, sc, WM_WORKQUEUE_PRI, IPL_SOFTCLOCK,
   2275 	    WQ_MPSAFE);
   2276 	if (error) {
   2277 		workqueue_destroy(sc->sc_queue_wq);
   2278 		aprint_error_dev(sc->sc_dev,
   2279 		    "unable to create reset workqueue\n");
   2280 		goto out;
   2281 	}
   2282 
   2283 	/*
   2284 	 * Check the function ID (unit number of the chip).
   2285 	 */
   2286 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2287 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2288 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2289 	    || (sc->sc_type == WM_T_82580)
   2290 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2291 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2292 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2293 	else
   2294 		sc->sc_funcid = 0;
   2295 
   2296 	/*
   2297 	 * Determine a few things about the bus we're connected to.
   2298 	 */
   2299 	if (sc->sc_type < WM_T_82543) {
   2300 		/* We don't really know the bus characteristics here. */
   2301 		sc->sc_bus_speed = 33;
   2302 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2303 		/*
   2304 		 * CSA (Communication Streaming Architecture) is about as fast
   2305 		 * a 32-bit 66MHz PCI Bus.
   2306 		 */
   2307 		sc->sc_flags |= WM_F_CSA;
   2308 		sc->sc_bus_speed = 66;
   2309 		aprint_verbose_dev(sc->sc_dev,
   2310 		    "Communication Streaming Architecture\n");
   2311 		if (sc->sc_type == WM_T_82547) {
   2312 			callout_init(&sc->sc_txfifo_ch, CALLOUT_MPSAFE);
   2313 			callout_setfunc(&sc->sc_txfifo_ch,
   2314 			    wm_82547_txfifo_stall, sc);
   2315 			aprint_verbose_dev(sc->sc_dev,
   2316 			    "using 82547 Tx FIFO stall work-around\n");
   2317 		}
   2318 	} else if (sc->sc_type >= WM_T_82571) {
   2319 		sc->sc_flags |= WM_F_PCIE;
   2320 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2321 		    && (sc->sc_type != WM_T_ICH10)
   2322 		    && (sc->sc_type != WM_T_PCH)
   2323 		    && (sc->sc_type != WM_T_PCH2)
   2324 		    && (sc->sc_type != WM_T_PCH_LPT)
   2325 		    && (sc->sc_type != WM_T_PCH_SPT)
   2326 		    && (sc->sc_type != WM_T_PCH_CNP)
   2327 		    && (sc->sc_type != WM_T_PCH_TGP)) {
   2328 			/* ICH* and PCH* have no PCIe capability registers */
   2329 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2330 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2331 				NULL) == 0)
   2332 				aprint_error_dev(sc->sc_dev,
   2333 				    "unable to find PCIe capability\n");
   2334 		}
   2335 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2336 	} else {
   2337 		reg = CSR_READ(sc, WMREG_STATUS);
   2338 		if (reg & STATUS_BUS64)
   2339 			sc->sc_flags |= WM_F_BUS64;
   2340 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2341 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2342 
   2343 			sc->sc_flags |= WM_F_PCIX;
   2344 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2345 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2346 				aprint_error_dev(sc->sc_dev,
   2347 				    "unable to find PCIX capability\n");
   2348 			else if (sc->sc_type != WM_T_82545_3 &&
   2349 			    sc->sc_type != WM_T_82546_3) {
   2350 				/*
   2351 				 * Work around a problem caused by the BIOS
   2352 				 * setting the max memory read byte count
   2353 				 * incorrectly.
   2354 				 */
   2355 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2356 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2357 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2358 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2359 
   2360 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2361 				    PCIX_CMD_BYTECNT_SHIFT;
   2362 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2363 				    PCIX_STATUS_MAXB_SHIFT;
   2364 				if (bytecnt > maxb) {
   2365 					aprint_verbose_dev(sc->sc_dev,
   2366 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2367 					    512 << bytecnt, 512 << maxb);
   2368 					pcix_cmd = (pcix_cmd &
   2369 					    ~PCIX_CMD_BYTECNT_MASK) |
   2370 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2371 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2372 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2373 					    pcix_cmd);
   2374 				}
   2375 			}
   2376 		}
   2377 		/*
   2378 		 * The quad port adapter is special; it has a PCIX-PCIX
   2379 		 * bridge on the board, and can run the secondary bus at
   2380 		 * a higher speed.
   2381 		 */
   2382 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2383 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2384 								      : 66;
   2385 		} else if (sc->sc_flags & WM_F_PCIX) {
   2386 			switch (reg & STATUS_PCIXSPD_MASK) {
   2387 			case STATUS_PCIXSPD_50_66:
   2388 				sc->sc_bus_speed = 66;
   2389 				break;
   2390 			case STATUS_PCIXSPD_66_100:
   2391 				sc->sc_bus_speed = 100;
   2392 				break;
   2393 			case STATUS_PCIXSPD_100_133:
   2394 				sc->sc_bus_speed = 133;
   2395 				break;
   2396 			default:
   2397 				aprint_error_dev(sc->sc_dev,
   2398 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2399 				    reg & STATUS_PCIXSPD_MASK);
   2400 				sc->sc_bus_speed = 66;
   2401 				break;
   2402 			}
   2403 		} else
   2404 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2405 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2406 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2407 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2408 	}
   2409 
   2410 	/* clear interesting stat counters */
   2411 	CSR_READ(sc, WMREG_COLC);
   2412 	CSR_READ(sc, WMREG_RXERRC);
   2413 
   2414 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2415 	    || (sc->sc_type >= WM_T_ICH8))
   2416 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2417 	if (sc->sc_type >= WM_T_ICH8)
   2418 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2419 
   2420 	/* Set PHY, NVM mutex related stuff */
   2421 	switch (sc->sc_type) {
   2422 	case WM_T_82542_2_0:
   2423 	case WM_T_82542_2_1:
   2424 	case WM_T_82543:
   2425 	case WM_T_82544:
   2426 		/* Microwire */
   2427 		sc->nvm.read = wm_nvm_read_uwire;
   2428 		sc->sc_nvm_wordsize = 64;
   2429 		sc->sc_nvm_addrbits = 6;
   2430 		break;
   2431 	case WM_T_82540:
   2432 	case WM_T_82545:
   2433 	case WM_T_82545_3:
   2434 	case WM_T_82546:
   2435 	case WM_T_82546_3:
   2436 		/* Microwire */
   2437 		sc->nvm.read = wm_nvm_read_uwire;
   2438 		reg = CSR_READ(sc, WMREG_EECD);
   2439 		if (reg & EECD_EE_SIZE) {
   2440 			sc->sc_nvm_wordsize = 256;
   2441 			sc->sc_nvm_addrbits = 8;
   2442 		} else {
   2443 			sc->sc_nvm_wordsize = 64;
   2444 			sc->sc_nvm_addrbits = 6;
   2445 		}
   2446 		sc->sc_flags |= WM_F_LOCK_EECD;
   2447 		sc->nvm.acquire = wm_get_eecd;
   2448 		sc->nvm.release = wm_put_eecd;
   2449 		break;
   2450 	case WM_T_82541:
   2451 	case WM_T_82541_2:
   2452 	case WM_T_82547:
   2453 	case WM_T_82547_2:
   2454 		reg = CSR_READ(sc, WMREG_EECD);
   2455 		/*
   2456 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2457 		 * on 8254[17], so set flags and functios before calling it.
   2458 		 */
   2459 		sc->sc_flags |= WM_F_LOCK_EECD;
   2460 		sc->nvm.acquire = wm_get_eecd;
   2461 		sc->nvm.release = wm_put_eecd;
   2462 		if (reg & EECD_EE_TYPE) {
   2463 			/* SPI */
   2464 			sc->nvm.read = wm_nvm_read_spi;
   2465 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2466 			wm_nvm_set_addrbits_size_eecd(sc);
   2467 		} else {
   2468 			/* Microwire */
   2469 			sc->nvm.read = wm_nvm_read_uwire;
   2470 			if ((reg & EECD_EE_ABITS) != 0) {
   2471 				sc->sc_nvm_wordsize = 256;
   2472 				sc->sc_nvm_addrbits = 8;
   2473 			} else {
   2474 				sc->sc_nvm_wordsize = 64;
   2475 				sc->sc_nvm_addrbits = 6;
   2476 			}
   2477 		}
   2478 		break;
   2479 	case WM_T_82571:
   2480 	case WM_T_82572:
   2481 		/* SPI */
   2482 		sc->nvm.read = wm_nvm_read_eerd;
   2483 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2484 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2485 		wm_nvm_set_addrbits_size_eecd(sc);
   2486 		sc->phy.acquire = wm_get_swsm_semaphore;
   2487 		sc->phy.release = wm_put_swsm_semaphore;
   2488 		sc->nvm.acquire = wm_get_nvm_82571;
   2489 		sc->nvm.release = wm_put_nvm_82571;
   2490 		break;
   2491 	case WM_T_82573:
   2492 	case WM_T_82574:
   2493 	case WM_T_82583:
   2494 		sc->nvm.read = wm_nvm_read_eerd;
   2495 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2496 		if (sc->sc_type == WM_T_82573) {
   2497 			sc->phy.acquire = wm_get_swsm_semaphore;
   2498 			sc->phy.release = wm_put_swsm_semaphore;
   2499 			sc->nvm.acquire = wm_get_nvm_82571;
   2500 			sc->nvm.release = wm_put_nvm_82571;
   2501 		} else {
   2502 			/* Both PHY and NVM use the same semaphore. */
   2503 			sc->phy.acquire = sc->nvm.acquire
   2504 			    = wm_get_swfwhw_semaphore;
   2505 			sc->phy.release = sc->nvm.release
   2506 			    = wm_put_swfwhw_semaphore;
   2507 		}
   2508 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2509 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2510 			sc->sc_nvm_wordsize = 2048;
   2511 		} else {
   2512 			/* SPI */
   2513 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2514 			wm_nvm_set_addrbits_size_eecd(sc);
   2515 		}
   2516 		break;
   2517 	case WM_T_82575:
   2518 	case WM_T_82576:
   2519 	case WM_T_82580:
   2520 	case WM_T_I350:
   2521 	case WM_T_I354:
   2522 	case WM_T_80003:
   2523 		/* SPI */
   2524 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2525 		wm_nvm_set_addrbits_size_eecd(sc);
   2526 		if ((sc->sc_type == WM_T_80003)
   2527 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2528 			sc->nvm.read = wm_nvm_read_eerd;
   2529 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2530 		} else {
   2531 			sc->nvm.read = wm_nvm_read_spi;
   2532 			sc->sc_flags |= WM_F_LOCK_EECD;
   2533 		}
   2534 		sc->phy.acquire = wm_get_phy_82575;
   2535 		sc->phy.release = wm_put_phy_82575;
   2536 		sc->nvm.acquire = wm_get_nvm_80003;
   2537 		sc->nvm.release = wm_put_nvm_80003;
   2538 		break;
   2539 	case WM_T_ICH8:
   2540 	case WM_T_ICH9:
   2541 	case WM_T_ICH10:
   2542 	case WM_T_PCH:
   2543 	case WM_T_PCH2:
   2544 	case WM_T_PCH_LPT:
   2545 		sc->nvm.read = wm_nvm_read_ich8;
   2546 		/* FLASH */
   2547 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2548 		sc->sc_nvm_wordsize = 2048;
   2549 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2550 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2551 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2552 			aprint_error_dev(sc->sc_dev,
   2553 			    "can't map FLASH registers\n");
   2554 			goto out;
   2555 		}
   2556 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2557 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2558 		    ICH_FLASH_SECTOR_SIZE;
   2559 		sc->sc_ich8_flash_bank_size =
   2560 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2561 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2562 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2563 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2564 		sc->sc_flashreg_offset = 0;
   2565 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2566 		sc->phy.release = wm_put_swflag_ich8lan;
   2567 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2568 		sc->nvm.release = wm_put_nvm_ich8lan;
   2569 		break;
   2570 	case WM_T_PCH_SPT:
   2571 	case WM_T_PCH_CNP:
   2572 	case WM_T_PCH_TGP:
   2573 		sc->nvm.read = wm_nvm_read_spt;
   2574 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2575 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2576 		sc->sc_flasht = sc->sc_st;
   2577 		sc->sc_flashh = sc->sc_sh;
   2578 		sc->sc_ich8_flash_base = 0;
   2579 		sc->sc_nvm_wordsize =
   2580 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2581 		    * NVM_SIZE_MULTIPLIER;
   2582 		/* It is size in bytes, we want words */
   2583 		sc->sc_nvm_wordsize /= 2;
   2584 		/* Assume 2 banks */
   2585 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2586 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2587 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2588 		sc->phy.release = wm_put_swflag_ich8lan;
   2589 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2590 		sc->nvm.release = wm_put_nvm_ich8lan;
   2591 		break;
   2592 	case WM_T_I210:
   2593 	case WM_T_I211:
   2594 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2595 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2596 		if (wm_nvm_flash_presence_i210(sc)) {
   2597 			sc->nvm.read = wm_nvm_read_eerd;
   2598 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2599 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2600 			wm_nvm_set_addrbits_size_eecd(sc);
   2601 		} else {
   2602 			sc->nvm.read = wm_nvm_read_invm;
   2603 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2604 			sc->sc_nvm_wordsize = INVM_SIZE;
   2605 		}
   2606 		sc->phy.acquire = wm_get_phy_82575;
   2607 		sc->phy.release = wm_put_phy_82575;
   2608 		sc->nvm.acquire = wm_get_nvm_80003;
   2609 		sc->nvm.release = wm_put_nvm_80003;
   2610 		break;
   2611 	default:
   2612 		break;
   2613 	}
   2614 
   2615 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2616 	switch (sc->sc_type) {
   2617 	case WM_T_82571:
   2618 	case WM_T_82572:
   2619 		reg = CSR_READ(sc, WMREG_SWSM2);
   2620 		if ((reg & SWSM2_LOCK) == 0) {
   2621 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2622 			force_clear_smbi = true;
   2623 		} else
   2624 			force_clear_smbi = false;
   2625 		break;
   2626 	case WM_T_82573:
   2627 	case WM_T_82574:
   2628 	case WM_T_82583:
   2629 		force_clear_smbi = true;
   2630 		break;
   2631 	default:
   2632 		force_clear_smbi = false;
   2633 		break;
   2634 	}
   2635 	if (force_clear_smbi) {
   2636 		reg = CSR_READ(sc, WMREG_SWSM);
   2637 		if ((reg & SWSM_SMBI) != 0)
   2638 			aprint_error_dev(sc->sc_dev,
   2639 			    "Please update the Bootagent\n");
   2640 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2641 	}
   2642 
   2643 	/*
   2644 	 * Defer printing the EEPROM type until after verifying the checksum
   2645 	 * This allows the EEPROM type to be printed correctly in the case
   2646 	 * that no EEPROM is attached.
   2647 	 */
   2648 	/*
   2649 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2650 	 * this for later, so we can fail future reads from the EEPROM.
   2651 	 */
   2652 	if (wm_nvm_validate_checksum(sc)) {
   2653 		/*
   2654 		 * Read twice again because some PCI-e parts fail the
   2655 		 * first check due to the link being in sleep state.
   2656 		 */
   2657 		if (wm_nvm_validate_checksum(sc))
   2658 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2659 	}
   2660 
   2661 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2662 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2663 	else {
   2664 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2665 		    sc->sc_nvm_wordsize);
   2666 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2667 			aprint_verbose("iNVM");
   2668 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2669 			aprint_verbose("FLASH(HW)");
   2670 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2671 			aprint_verbose("FLASH");
   2672 		else {
   2673 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2674 				eetype = "SPI";
   2675 			else
   2676 				eetype = "MicroWire";
   2677 			aprint_verbose("(%d address bits) %s EEPROM",
   2678 			    sc->sc_nvm_addrbits, eetype);
   2679 		}
   2680 	}
   2681 	wm_nvm_version(sc);
   2682 	aprint_verbose("\n");
   2683 
   2684 	/*
   2685 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2686 	 * incorrect.
   2687 	 */
   2688 	wm_gmii_setup_phytype(sc, 0, 0);
   2689 
   2690 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2691 	switch (sc->sc_type) {
   2692 	case WM_T_ICH8:
   2693 	case WM_T_ICH9:
   2694 	case WM_T_ICH10:
   2695 	case WM_T_PCH:
   2696 	case WM_T_PCH2:
   2697 	case WM_T_PCH_LPT:
   2698 	case WM_T_PCH_SPT:
   2699 	case WM_T_PCH_CNP:
   2700 	case WM_T_PCH_TGP:
   2701 		apme_mask = WUC_APME;
   2702 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2703 		if ((eeprom_data & apme_mask) != 0)
   2704 			sc->sc_flags |= WM_F_WOL;
   2705 		break;
   2706 	default:
   2707 		break;
   2708 	}
   2709 
   2710 	/* Reset the chip to a known state. */
   2711 	wm_reset(sc);
   2712 
   2713 	/*
   2714 	 * Check for I21[01] PLL workaround.
   2715 	 *
   2716 	 * Three cases:
   2717 	 * a) Chip is I211.
   2718 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2719 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2720 	 */
   2721 	if (sc->sc_type == WM_T_I211)
   2722 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2723 	if (sc->sc_type == WM_T_I210) {
   2724 		if (!wm_nvm_flash_presence_i210(sc))
   2725 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2726 		else if ((sc->sc_nvm_ver_major < 3)
   2727 		    || ((sc->sc_nvm_ver_major == 3)
   2728 			&& (sc->sc_nvm_ver_minor < 25))) {
   2729 			aprint_verbose_dev(sc->sc_dev,
   2730 			    "ROM image version %d.%d is older than 3.25\n",
   2731 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2732 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2733 		}
   2734 	}
   2735 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2736 		wm_pll_workaround_i210(sc);
   2737 
   2738 	wm_get_wakeup(sc);
   2739 
   2740 	/* Non-AMT based hardware can now take control from firmware */
   2741 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2742 		wm_get_hw_control(sc);
   2743 
   2744 	/*
   2745 	 * Read the Ethernet address from the EEPROM, if not first found
   2746 	 * in device properties.
   2747 	 */
   2748 	ea = prop_dictionary_get(dict, "mac-address");
   2749 	if (ea != NULL) {
   2750 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2751 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2752 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2753 	} else {
   2754 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2755 			aprint_error_dev(sc->sc_dev,
   2756 			    "unable to read Ethernet address\n");
   2757 			goto out;
   2758 		}
   2759 	}
   2760 
   2761 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2762 	    ether_sprintf(enaddr));
   2763 
   2764 	/*
   2765 	 * Read the config info from the EEPROM, and set up various
   2766 	 * bits in the control registers based on their contents.
   2767 	 */
   2768 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2769 	if (pn != NULL) {
   2770 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2771 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2772 	} else {
   2773 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2774 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2775 			goto out;
   2776 		}
   2777 	}
   2778 
   2779 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2780 	if (pn != NULL) {
   2781 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2782 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2783 	} else {
   2784 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2785 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2786 			goto out;
   2787 		}
   2788 	}
   2789 
   2790 	/* check for WM_F_WOL */
   2791 	switch (sc->sc_type) {
   2792 	case WM_T_82542_2_0:
   2793 	case WM_T_82542_2_1:
   2794 	case WM_T_82543:
   2795 		/* dummy? */
   2796 		eeprom_data = 0;
   2797 		apme_mask = NVM_CFG3_APME;
   2798 		break;
   2799 	case WM_T_82544:
   2800 		apme_mask = NVM_CFG2_82544_APM_EN;
   2801 		eeprom_data = cfg2;
   2802 		break;
   2803 	case WM_T_82546:
   2804 	case WM_T_82546_3:
   2805 	case WM_T_82571:
   2806 	case WM_T_82572:
   2807 	case WM_T_82573:
   2808 	case WM_T_82574:
   2809 	case WM_T_82583:
   2810 	case WM_T_80003:
   2811 	case WM_T_82575:
   2812 	case WM_T_82576:
   2813 		apme_mask = NVM_CFG3_APME;
   2814 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2815 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2816 		break;
   2817 	case WM_T_82580:
   2818 	case WM_T_I350:
   2819 	case WM_T_I354:
   2820 	case WM_T_I210:
   2821 	case WM_T_I211:
   2822 		apme_mask = NVM_CFG3_APME;
   2823 		wm_nvm_read(sc,
   2824 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2825 		    1, &eeprom_data);
   2826 		break;
   2827 	case WM_T_ICH8:
   2828 	case WM_T_ICH9:
   2829 	case WM_T_ICH10:
   2830 	case WM_T_PCH:
   2831 	case WM_T_PCH2:
   2832 	case WM_T_PCH_LPT:
   2833 	case WM_T_PCH_SPT:
   2834 	case WM_T_PCH_CNP:
   2835 	case WM_T_PCH_TGP:
   2836 		/* Already checked before wm_reset () */
   2837 		apme_mask = eeprom_data = 0;
   2838 		break;
   2839 	default: /* XXX 82540 */
   2840 		apme_mask = NVM_CFG3_APME;
   2841 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2842 		break;
   2843 	}
   2844 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2845 	if ((eeprom_data & apme_mask) != 0)
   2846 		sc->sc_flags |= WM_F_WOL;
   2847 
   2848 	/*
   2849 	 * We have the eeprom settings, now apply the special cases
   2850 	 * where the eeprom may be wrong or the board won't support
   2851 	 * wake on lan on a particular port
   2852 	 */
   2853 	switch (sc->sc_pcidevid) {
   2854 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2855 		sc->sc_flags &= ~WM_F_WOL;
   2856 		break;
   2857 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2858 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2859 		/* Wake events only supported on port A for dual fiber
   2860 		 * regardless of eeprom setting */
   2861 		if (sc->sc_funcid == 1)
   2862 			sc->sc_flags &= ~WM_F_WOL;
   2863 		break;
   2864 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2865 		/* If quad port adapter, disable WoL on all but port A */
   2866 		if (sc->sc_funcid != 0)
   2867 			sc->sc_flags &= ~WM_F_WOL;
   2868 		break;
   2869 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2870 		/* Wake events only supported on port A for dual fiber
   2871 		 * regardless of eeprom setting */
   2872 		if (sc->sc_funcid == 1)
   2873 			sc->sc_flags &= ~WM_F_WOL;
   2874 		break;
   2875 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2876 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2877 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2878 		/* If quad port adapter, disable WoL on all but port A */
   2879 		if (sc->sc_funcid != 0)
   2880 			sc->sc_flags &= ~WM_F_WOL;
   2881 		break;
   2882 	}
   2883 
   2884 	if (sc->sc_type >= WM_T_82575) {
   2885 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2886 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2887 			    nvmword);
   2888 			if ((sc->sc_type == WM_T_82575) ||
   2889 			    (sc->sc_type == WM_T_82576)) {
   2890 				/* Check NVM for autonegotiation */
   2891 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2892 				    != 0)
   2893 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2894 			}
   2895 			if ((sc->sc_type == WM_T_82575) ||
   2896 			    (sc->sc_type == WM_T_I350)) {
   2897 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2898 					sc->sc_flags |= WM_F_MAS;
   2899 			}
   2900 		}
   2901 	}
   2902 
   2903 	/*
   2904 	 * XXX need special handling for some multiple port cards
   2905 	 * to disable a paticular port.
   2906 	 */
   2907 
   2908 	if (sc->sc_type >= WM_T_82544) {
   2909 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2910 		if (pn != NULL) {
   2911 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2912 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2913 		} else {
   2914 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2915 				aprint_error_dev(sc->sc_dev,
   2916 				    "unable to read SWDPIN\n");
   2917 				goto out;
   2918 			}
   2919 		}
   2920 	}
   2921 
   2922 	if (cfg1 & NVM_CFG1_ILOS)
   2923 		sc->sc_ctrl |= CTRL_ILOS;
   2924 
   2925 	/*
   2926 	 * XXX
   2927 	 * This code isn't correct because pin 2 and 3 are located
   2928 	 * in different position on newer chips. Check all datasheet.
   2929 	 *
   2930 	 * Until resolve this problem, check if a chip < 82580
   2931 	 */
   2932 	if (sc->sc_type <= WM_T_82580) {
   2933 		if (sc->sc_type >= WM_T_82544) {
   2934 			sc->sc_ctrl |=
   2935 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2936 			    CTRL_SWDPIO_SHIFT;
   2937 			sc->sc_ctrl |=
   2938 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2939 			    CTRL_SWDPINS_SHIFT;
   2940 		} else {
   2941 			sc->sc_ctrl |=
   2942 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2943 			    CTRL_SWDPIO_SHIFT;
   2944 		}
   2945 	}
   2946 
   2947 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2948 		wm_nvm_read(sc,
   2949 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2950 		    1, &nvmword);
   2951 		if (nvmword & NVM_CFG3_ILOS)
   2952 			sc->sc_ctrl |= CTRL_ILOS;
   2953 	}
   2954 
   2955 #if 0
   2956 	if (sc->sc_type >= WM_T_82544) {
   2957 		if (cfg1 & NVM_CFG1_IPS0)
   2958 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2959 		if (cfg1 & NVM_CFG1_IPS1)
   2960 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2961 		sc->sc_ctrl_ext |=
   2962 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2963 		    CTRL_EXT_SWDPIO_SHIFT;
   2964 		sc->sc_ctrl_ext |=
   2965 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2966 		    CTRL_EXT_SWDPINS_SHIFT;
   2967 	} else {
   2968 		sc->sc_ctrl_ext |=
   2969 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2970 		    CTRL_EXT_SWDPIO_SHIFT;
   2971 	}
   2972 #endif
   2973 
   2974 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2975 #if 0
   2976 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2977 #endif
   2978 
   2979 	if (sc->sc_type == WM_T_PCH) {
   2980 		uint16_t val;
   2981 
   2982 		/* Save the NVM K1 bit setting */
   2983 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2984 
   2985 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2986 			sc->sc_nvm_k1_enabled = 1;
   2987 		else
   2988 			sc->sc_nvm_k1_enabled = 0;
   2989 	}
   2990 
   2991 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2992 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2993 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2994 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2995 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2996 	    || sc->sc_type == WM_T_PCH_TGP
   2997 	    || sc->sc_type == WM_T_82573
   2998 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2999 		/* Copper only */
   3000 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3001 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   3002 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   3003 	    || (sc->sc_type ==WM_T_I211)) {
   3004 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3005 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   3006 		switch (link_mode) {
   3007 		case CTRL_EXT_LINK_MODE_1000KX:
   3008 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   3009 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   3010 			break;
   3011 		case CTRL_EXT_LINK_MODE_SGMII:
   3012 			if (wm_sgmii_uses_mdio(sc)) {
   3013 				aprint_normal_dev(sc->sc_dev,
   3014 				    "SGMII(MDIO)\n");
   3015 				sc->sc_flags |= WM_F_SGMII;
   3016 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3017 				break;
   3018 			}
   3019 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   3020 			/*FALLTHROUGH*/
   3021 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   3022 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   3023 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   3024 				if (link_mode
   3025 				    == CTRL_EXT_LINK_MODE_SGMII) {
   3026 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3027 					sc->sc_flags |= WM_F_SGMII;
   3028 					aprint_verbose_dev(sc->sc_dev,
   3029 					    "SGMII\n");
   3030 				} else {
   3031 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   3032 					aprint_verbose_dev(sc->sc_dev,
   3033 					    "SERDES\n");
   3034 				}
   3035 				break;
   3036 			}
   3037 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   3038 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   3039 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   3040 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   3041 				sc->sc_flags |= WM_F_SGMII;
   3042 			}
   3043 			/* Do not change link mode for 100BaseFX */
   3044 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   3045 				break;
   3046 
   3047 			/* Change current link mode setting */
   3048 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   3049 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   3050 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   3051 			else
   3052 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   3053 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3054 			break;
   3055 		case CTRL_EXT_LINK_MODE_GMII:
   3056 		default:
   3057 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   3058 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3059 			break;
   3060 		}
   3061 
   3062 		reg &= ~CTRL_EXT_I2C_ENA;
   3063 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   3064 			reg |= CTRL_EXT_I2C_ENA;
   3065 		else
   3066 			reg &= ~CTRL_EXT_I2C_ENA;
   3067 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3068 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   3069 			if (!wm_sgmii_uses_mdio(sc))
   3070 				wm_gmii_setup_phytype(sc, 0, 0);
   3071 			wm_reset_mdicnfg_82580(sc);
   3072 		}
   3073 	} else if (sc->sc_type < WM_T_82543 ||
   3074 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   3075 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   3076 			aprint_error_dev(sc->sc_dev,
   3077 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   3078 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   3079 		}
   3080 	} else {
   3081 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   3082 			aprint_error_dev(sc->sc_dev,
   3083 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   3084 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3085 		}
   3086 	}
   3087 
   3088 	if (sc->sc_type >= WM_T_PCH2)
   3089 		sc->sc_flags |= WM_F_EEE;
   3090 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   3091 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   3092 		/* XXX: Need special handling for I354. (not yet) */
   3093 		if (sc->sc_type != WM_T_I354)
   3094 			sc->sc_flags |= WM_F_EEE;
   3095 	}
   3096 
   3097 	/*
   3098 	 * The I350 has a bug where it always strips the CRC whether
   3099 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   3100 	 */
   3101 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3102 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3103 		sc->sc_flags |= WM_F_CRC_STRIP;
   3104 
   3105 	/*
   3106 	 * Workaround for some chips to delay sending LINK_STATE_UP.
   3107 	 * Some systems can't send packet soon after linkup. See also
   3108 	 * wm_linkintr_gmii(), wm_tick() and wm_gmii_mediastatus().
   3109 	 */
   3110 	switch (sc->sc_type) {
   3111 	case WM_T_I350:
   3112 	case WM_T_I354:
   3113 	case WM_T_I210:
   3114 	case WM_T_I211:
   3115 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   3116 			sc->sc_flags |= WM_F_DELAY_LINKUP;
   3117 		break;
   3118 	default:
   3119 		break;
   3120 	}
   3121 
   3122 	/* Set device properties (macflags) */
   3123 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   3124 
   3125 	if (sc->sc_flags != 0) {
   3126 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   3127 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   3128 	}
   3129 
   3130 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   3131 
   3132 	/* Initialize the media structures accordingly. */
   3133 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   3134 		wm_gmii_mediainit(sc, wmp->wmp_product);
   3135 	else
   3136 		wm_tbi_mediainit(sc); /* All others */
   3137 
   3138 	ifp = &sc->sc_ethercom.ec_if;
   3139 	xname = device_xname(sc->sc_dev);
   3140 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   3141 	ifp->if_softc = sc;
   3142 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   3143 	ifp->if_extflags = IFEF_MPSAFE;
   3144 	ifp->if_ioctl = wm_ioctl;
   3145 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3146 		ifp->if_start = wm_nq_start;
   3147 		/*
   3148 		 * When the number of CPUs is one and the controller can use
   3149 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   3150 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   3151 		 * and the other is used for link status changing.
   3152 		 * In this situation, wm_nq_transmit() is disadvantageous
   3153 		 * because of wm_select_txqueue() and pcq(9) overhead.
   3154 		 */
   3155 		if (wm_is_using_multiqueue(sc))
   3156 			ifp->if_transmit = wm_nq_transmit;
   3157 	} else {
   3158 		ifp->if_start = wm_start;
   3159 		/*
   3160 		 * wm_transmit() has the same disadvantages as wm_nq_transmit()
   3161 		 * described above.
   3162 		 */
   3163 		if (wm_is_using_multiqueue(sc))
   3164 			ifp->if_transmit = wm_transmit;
   3165 	}
   3166 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   3167 	ifp->if_init = wm_init;
   3168 	ifp->if_stop = wm_stop;
   3169 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   3170 	IFQ_SET_READY(&ifp->if_snd);
   3171 
   3172 	/* Check for jumbo frame */
   3173 	switch (sc->sc_type) {
   3174 	case WM_T_82573:
   3175 		/* XXX limited to 9234 if ASPM is disabled */
   3176 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   3177 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   3178 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3179 		break;
   3180 	case WM_T_82571:
   3181 	case WM_T_82572:
   3182 	case WM_T_82574:
   3183 	case WM_T_82583:
   3184 	case WM_T_82575:
   3185 	case WM_T_82576:
   3186 	case WM_T_82580:
   3187 	case WM_T_I350:
   3188 	case WM_T_I354:
   3189 	case WM_T_I210:
   3190 	case WM_T_I211:
   3191 	case WM_T_80003:
   3192 	case WM_T_ICH9:
   3193 	case WM_T_ICH10:
   3194 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   3195 	case WM_T_PCH_LPT:
   3196 	case WM_T_PCH_SPT:
   3197 	case WM_T_PCH_CNP:
   3198 	case WM_T_PCH_TGP:
   3199 		/* XXX limited to 9234 */
   3200 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3201 		break;
   3202 	case WM_T_PCH:
   3203 		/* XXX limited to 4096 */
   3204 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3205 		break;
   3206 	case WM_T_82542_2_0:
   3207 	case WM_T_82542_2_1:
   3208 	case WM_T_ICH8:
   3209 		/* No support for jumbo frame */
   3210 		break;
   3211 	default:
   3212 		/* ETHER_MAX_LEN_JUMBO */
   3213 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3214 		break;
   3215 	}
   3216 
   3217 	/* If we're a i82543 or greater, we can support VLANs. */
   3218 	if (sc->sc_type >= WM_T_82543) {
   3219 		sc->sc_ethercom.ec_capabilities |=
   3220 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   3221 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   3222 	}
   3223 
   3224 	if ((sc->sc_flags & WM_F_EEE) != 0)
   3225 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   3226 
   3227 	/*
   3228 	 * We can perform TCPv4 and UDPv4 checksums in-bound.  Only
   3229 	 * on i82543 and later.
   3230 	 */
   3231 	if (sc->sc_type >= WM_T_82543) {
   3232 		ifp->if_capabilities |=
   3233 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3234 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3235 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3236 		    IFCAP_CSUM_TCPv6_Tx |
   3237 		    IFCAP_CSUM_UDPv6_Tx;
   3238 	}
   3239 
   3240 	/*
   3241 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3242 	 *
   3243 	 *	82541GI (8086:1076) ... no
   3244 	 *	82572EI (8086:10b9) ... yes
   3245 	 */
   3246 	if (sc->sc_type >= WM_T_82571) {
   3247 		ifp->if_capabilities |=
   3248 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3249 	}
   3250 
   3251 	/*
   3252 	 * If we're a i82544 or greater (except i82547), we can do
   3253 	 * TCP segmentation offload.
   3254 	 */
   3255 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547)
   3256 		ifp->if_capabilities |= IFCAP_TSOv4;
   3257 
   3258 	if (sc->sc_type >= WM_T_82571)
   3259 		ifp->if_capabilities |= IFCAP_TSOv6;
   3260 
   3261 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3262 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3263 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3264 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3265 
   3266 	/* Attach the interface. */
   3267 	if_initialize(ifp);
   3268 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3269 	ether_ifattach(ifp, enaddr);
   3270 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3271 	if_register(ifp);
   3272 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3273 	    RND_FLAG_DEFAULT);
   3274 
   3275 #ifdef WM_EVENT_COUNTERS
   3276 	/* Attach event counters. */
   3277 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3278 	    NULL, xname, "linkintr");
   3279 
   3280 	evcnt_attach_dynamic(&sc->sc_ev_crcerrs, EVCNT_TYPE_MISC,
   3281 	    NULL, xname, "CRC Error");
   3282 	evcnt_attach_dynamic(&sc->sc_ev_symerrc, EVCNT_TYPE_MISC,
   3283 	    NULL, xname, "Symbol Error");
   3284 	evcnt_attach_dynamic(&sc->sc_ev_mpc, EVCNT_TYPE_MISC,
   3285 	    NULL, xname, "Missed Packets");
   3286 	evcnt_attach_dynamic(&sc->sc_ev_colc, EVCNT_TYPE_MISC,
   3287 	    NULL, xname, "Collision");
   3288 	evcnt_attach_dynamic(&sc->sc_ev_sec, EVCNT_TYPE_MISC,
   3289 	    NULL, xname, "Sequence Error");
   3290 	evcnt_attach_dynamic(&sc->sc_ev_rlec, EVCNT_TYPE_MISC,
   3291 	    NULL, xname, "Receive Length Error");
   3292 
   3293 	if (sc->sc_type >= WM_T_82543) {
   3294 		evcnt_attach_dynamic(&sc->sc_ev_algnerrc, EVCNT_TYPE_MISC,
   3295 		    NULL, xname, "Alignment Error");
   3296 		evcnt_attach_dynamic(&sc->sc_ev_rxerrc, EVCNT_TYPE_MISC,
   3297 		    NULL, xname, "Receive Error");
   3298 		/* XXX Does 82575 have HTDPMC? */
   3299 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   3300 			evcnt_attach_dynamic(&sc->sc_ev_cexterr,
   3301 			    EVCNT_TYPE_MISC, NULL, xname,
   3302 			    "Carrier Extension Error");
   3303 		else
   3304 			evcnt_attach_dynamic(&sc->sc_ev_htdpmc,
   3305 			    EVCNT_TYPE_MISC, NULL, xname,
   3306 			    "Host Transmit Discarded Packets by MAC");
   3307 
   3308 		evcnt_attach_dynamic(&sc->sc_ev_tncrs, EVCNT_TYPE_MISC,
   3309 		    NULL, xname, "Tx with No CRS");
   3310 		evcnt_attach_dynamic(&sc->sc_ev_tsctc, EVCNT_TYPE_MISC,
   3311 		    NULL, xname, "TCP Segmentation Context Tx");
   3312 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   3313 			evcnt_attach_dynamic(&sc->sc_ev_tsctfc,
   3314 			    EVCNT_TYPE_MISC, NULL, xname,
   3315 			    "TCP Segmentation Context Tx Fail");
   3316 		else {
   3317 			/* XXX Is the circuit breaker only for 82576? */
   3318 			evcnt_attach_dynamic(&sc->sc_ev_cbrdpc,
   3319 			    EVCNT_TYPE_MISC, NULL, xname,
   3320 			    "Circuit Breaker Rx Dropped Packet");
   3321 			evcnt_attach_dynamic(&sc->sc_ev_cbrmpc,
   3322 			    EVCNT_TYPE_MISC, NULL, xname,
   3323 			    "Circuit Breaker Rx Manageability Packet");
   3324 		}
   3325 	}
   3326 
   3327 	if (sc->sc_type >= WM_T_82542_2_1) {
   3328 		evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3329 		    NULL, xname, "XOFF Transmitted");
   3330 		evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3331 		    NULL, xname, "XON Transmitted");
   3332 		evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3333 		    NULL, xname, "XOFF Received");
   3334 		evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3335 		    NULL, xname, "XON Received");
   3336 		evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3337 		    NULL, xname, "FC Received Unsupported");
   3338 	}
   3339 
   3340 	evcnt_attach_dynamic(&sc->sc_ev_scc, EVCNT_TYPE_MISC,
   3341 	    NULL, xname, "Single Collision");
   3342 	evcnt_attach_dynamic(&sc->sc_ev_ecol, EVCNT_TYPE_MISC,
   3343 	    NULL, xname, "Excessive Collisions");
   3344 	evcnt_attach_dynamic(&sc->sc_ev_mcc, EVCNT_TYPE_MISC,
   3345 	    NULL, xname, "Multiple Collision");
   3346 	evcnt_attach_dynamic(&sc->sc_ev_latecol, EVCNT_TYPE_MISC,
   3347 	    NULL, xname, "Late Collisions");
   3348 
   3349 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
   3350 		evcnt_attach_dynamic(&sc->sc_ev_cbtmpc, EVCNT_TYPE_MISC,
   3351 		    NULL, xname, "Circuit Breaker Tx Manageability Packet");
   3352 
   3353 	evcnt_attach_dynamic(&sc->sc_ev_dc, EVCNT_TYPE_MISC,
   3354 	    NULL, xname, "Defer");
   3355 	evcnt_attach_dynamic(&sc->sc_ev_prc64, EVCNT_TYPE_MISC,
   3356 	    NULL, xname, "Packets Rx (64 bytes)");
   3357 	evcnt_attach_dynamic(&sc->sc_ev_prc127, EVCNT_TYPE_MISC,
   3358 	    NULL, xname, "Packets Rx (65-127 bytes)");
   3359 	evcnt_attach_dynamic(&sc->sc_ev_prc255, EVCNT_TYPE_MISC,
   3360 	    NULL, xname, "Packets Rx (128-255 bytes)");
   3361 	evcnt_attach_dynamic(&sc->sc_ev_prc511, EVCNT_TYPE_MISC,
   3362 	    NULL, xname, "Packets Rx (256-511 bytes)");
   3363 	evcnt_attach_dynamic(&sc->sc_ev_prc1023, EVCNT_TYPE_MISC,
   3364 	    NULL, xname, "Packets Rx (512-1023 bytes)");
   3365 	evcnt_attach_dynamic(&sc->sc_ev_prc1522, EVCNT_TYPE_MISC,
   3366 	    NULL, xname, "Packets Rx (1024-1522 bytes)");
   3367 	evcnt_attach_dynamic(&sc->sc_ev_gprc, EVCNT_TYPE_MISC,
   3368 	    NULL, xname, "Good Packets Rx");
   3369 	evcnt_attach_dynamic(&sc->sc_ev_bprc, EVCNT_TYPE_MISC,
   3370 	    NULL, xname, "Broadcast Packets Rx");
   3371 	evcnt_attach_dynamic(&sc->sc_ev_mprc, EVCNT_TYPE_MISC,
   3372 	    NULL, xname, "Multicast Packets Rx");
   3373 	evcnt_attach_dynamic(&sc->sc_ev_gptc, EVCNT_TYPE_MISC,
   3374 	    NULL, xname, "Good Packets Tx");
   3375 	evcnt_attach_dynamic(&sc->sc_ev_gorc, EVCNT_TYPE_MISC,
   3376 	    NULL, xname, "Good Octets Rx");
   3377 	evcnt_attach_dynamic(&sc->sc_ev_gotc, EVCNT_TYPE_MISC,
   3378 	    NULL, xname, "Good Octets Tx");
   3379 	evcnt_attach_dynamic(&sc->sc_ev_rnbc, EVCNT_TYPE_MISC,
   3380 	    NULL, xname, "Rx No Buffers");
   3381 	evcnt_attach_dynamic(&sc->sc_ev_ruc, EVCNT_TYPE_MISC,
   3382 	    NULL, xname, "Rx Undersize (valid CRC)");
   3383 	evcnt_attach_dynamic(&sc->sc_ev_rfc, EVCNT_TYPE_MISC,
   3384 	    NULL, xname, "Rx Fragment (bad CRC)");
   3385 	evcnt_attach_dynamic(&sc->sc_ev_roc, EVCNT_TYPE_MISC,
   3386 	    NULL, xname, "Rx Oversize (valid CRC)");
   3387 	evcnt_attach_dynamic(&sc->sc_ev_rjc, EVCNT_TYPE_MISC,
   3388 	    NULL, xname, "Rx Jabber (bad CRC)");
   3389 	if (sc->sc_type >= WM_T_82540) {
   3390 		evcnt_attach_dynamic(&sc->sc_ev_mgtprc, EVCNT_TYPE_MISC,
   3391 		    NULL, xname, "Management Packets RX");
   3392 		evcnt_attach_dynamic(&sc->sc_ev_mgtpdc, EVCNT_TYPE_MISC,
   3393 		    NULL, xname, "Management Packets Dropped");
   3394 		evcnt_attach_dynamic(&sc->sc_ev_mgtptc, EVCNT_TYPE_MISC,
   3395 		    NULL, xname, "Management Packets TX");
   3396 	}
   3397 	evcnt_attach_dynamic(&sc->sc_ev_tor, EVCNT_TYPE_MISC,
   3398 	    NULL, xname, "Total Octets Rx");
   3399 	evcnt_attach_dynamic(&sc->sc_ev_tot, EVCNT_TYPE_MISC,
   3400 	    NULL, xname, "Total Octets Tx");
   3401 	evcnt_attach_dynamic(&sc->sc_ev_tpr, EVCNT_TYPE_MISC,
   3402 	    NULL, xname, "Total Packets Rx");
   3403 	evcnt_attach_dynamic(&sc->sc_ev_tpt, EVCNT_TYPE_MISC,
   3404 	    NULL, xname, "Total Packets Tx");
   3405 	evcnt_attach_dynamic(&sc->sc_ev_ptc64, EVCNT_TYPE_MISC,
   3406 	    NULL, xname, "Packets Tx (64 bytes)");
   3407 	evcnt_attach_dynamic(&sc->sc_ev_ptc127, EVCNT_TYPE_MISC,
   3408 	    NULL, xname, "Packets Tx (65-127 bytes)");
   3409 	evcnt_attach_dynamic(&sc->sc_ev_ptc255, EVCNT_TYPE_MISC,
   3410 	    NULL, xname, "Packets Tx (128-255 bytes)");
   3411 	evcnt_attach_dynamic(&sc->sc_ev_ptc511, EVCNT_TYPE_MISC,
   3412 	    NULL, xname, "Packets Tx (256-511 bytes)");
   3413 	evcnt_attach_dynamic(&sc->sc_ev_ptc1023, EVCNT_TYPE_MISC,
   3414 	    NULL, xname, "Packets Tx (512-1023 bytes)");
   3415 	evcnt_attach_dynamic(&sc->sc_ev_ptc1522, EVCNT_TYPE_MISC,
   3416 	    NULL, xname, "Packets Tx (1024-1522 Bytes)");
   3417 	evcnt_attach_dynamic(&sc->sc_ev_mptc, EVCNT_TYPE_MISC,
   3418 	    NULL, xname, "Multicast Packets Tx");
   3419 	evcnt_attach_dynamic(&sc->sc_ev_bptc, EVCNT_TYPE_MISC,
   3420 	    NULL, xname, "Broadcast Packets Tx");
   3421 	if (sc->sc_type >= WM_T_82571) /* PCIe, 80003 and ICH/PCHs */
   3422 		evcnt_attach_dynamic(&sc->sc_ev_iac, EVCNT_TYPE_MISC,
   3423 		    NULL, xname, "Interrupt Assertion");
   3424 	if (sc->sc_type < WM_T_82575) {
   3425 		evcnt_attach_dynamic(&sc->sc_ev_icrxptc, EVCNT_TYPE_MISC,
   3426 		    NULL, xname, "Intr. Cause Rx Pkt Timer Expire");
   3427 		evcnt_attach_dynamic(&sc->sc_ev_icrxatc, EVCNT_TYPE_MISC,
   3428 		    NULL, xname, "Intr. Cause Rx Abs Timer Expire");
   3429 		evcnt_attach_dynamic(&sc->sc_ev_ictxptc, EVCNT_TYPE_MISC,
   3430 		    NULL, xname, "Intr. Cause Tx Pkt Timer Expire");
   3431 		evcnt_attach_dynamic(&sc->sc_ev_ictxatc, EVCNT_TYPE_MISC,
   3432 		    NULL, xname, "Intr. Cause Tx Abs Timer Expire");
   3433 		evcnt_attach_dynamic(&sc->sc_ev_ictxqec, EVCNT_TYPE_MISC,
   3434 		    NULL, xname, "Intr. Cause Tx Queue Empty");
   3435 		evcnt_attach_dynamic(&sc->sc_ev_ictxqmtc, EVCNT_TYPE_MISC,
   3436 		    NULL, xname, "Intr. Cause Tx Queue Min Thresh");
   3437 		evcnt_attach_dynamic(&sc->sc_ev_rxdmtc, EVCNT_TYPE_MISC,
   3438 		    NULL, xname, "Intr. Cause Rx Desc Min Thresh");
   3439 
   3440 		/* XXX 82575 document says it has ICRXOC. Is that right? */
   3441 		evcnt_attach_dynamic(&sc->sc_ev_icrxoc, EVCNT_TYPE_MISC,
   3442 		    NULL, xname, "Interrupt Cause Receiver Overrun");
   3443 	} else if (!WM_IS_ICHPCH(sc)) {
   3444 		/*
   3445 		 * For 82575 and newer.
   3446 		 *
   3447 		 * On 80003, ICHs and PCHs, it seems all of the following
   3448 		 * registers are zero.
   3449 		 */
   3450 		evcnt_attach_dynamic(&sc->sc_ev_rpthc, EVCNT_TYPE_MISC,
   3451 		    NULL, xname, "Rx Packets To Host");
   3452 		evcnt_attach_dynamic(&sc->sc_ev_debug1, EVCNT_TYPE_MISC,
   3453 		    NULL, xname, "Debug Counter 1");
   3454 		evcnt_attach_dynamic(&sc->sc_ev_debug2, EVCNT_TYPE_MISC,
   3455 		    NULL, xname, "Debug Counter 2");
   3456 		evcnt_attach_dynamic(&sc->sc_ev_debug3, EVCNT_TYPE_MISC,
   3457 		    NULL, xname, "Debug Counter 3");
   3458 
   3459 		/*
   3460 		 * 82575 datasheet says 0x4118 is for TXQEC(Tx Queue Empty).
   3461 		 * I think it's wrong. The real count I observed is the same
   3462 		 * as GPTC(Good Packets Tx) and TPT(Total Packets Tx).
   3463 		 * It's HGPTC(Host Good Packets Tx) which is described in
   3464 		 * 82576's datasheet.
   3465 		 */
   3466 		evcnt_attach_dynamic(&sc->sc_ev_hgptc, EVCNT_TYPE_MISC,
   3467 		    NULL, xname, "Host Good Packets TX");
   3468 
   3469 		evcnt_attach_dynamic(&sc->sc_ev_debug4, EVCNT_TYPE_MISC,
   3470 		    NULL, xname, "Debug Counter 4");
   3471 		evcnt_attach_dynamic(&sc->sc_ev_rxdmtc, EVCNT_TYPE_MISC,
   3472 		    NULL, xname, "Rx Desc Min Thresh");
   3473 		/* XXX Is the circuit breaker only for 82576? */
   3474 		evcnt_attach_dynamic(&sc->sc_ev_htcbdpc, EVCNT_TYPE_MISC,
   3475 		    NULL, xname, "Host Tx Circuit Breaker Dropped Packets");
   3476 
   3477 		evcnt_attach_dynamic(&sc->sc_ev_hgorc, EVCNT_TYPE_MISC,
   3478 		    NULL, xname, "Host Good Octets Rx");
   3479 		evcnt_attach_dynamic(&sc->sc_ev_hgotc, EVCNT_TYPE_MISC,
   3480 		    NULL, xname, "Host Good Octets Tx");
   3481 		evcnt_attach_dynamic(&sc->sc_ev_lenerrs, EVCNT_TYPE_MISC,
   3482 		    NULL, xname, "Length Errors (length/type <= 1500)");
   3483 		evcnt_attach_dynamic(&sc->sc_ev_scvpc, EVCNT_TYPE_MISC,
   3484 		    NULL, xname, "SerDes/SGMII Code Violation Packet");
   3485 		evcnt_attach_dynamic(&sc->sc_ev_hrmpc, EVCNT_TYPE_MISC,
   3486 		    NULL, xname, "Header Redirection Missed Packet");
   3487 	}
   3488 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   3489 		evcnt_attach_dynamic(&sc->sc_ev_tlpic, EVCNT_TYPE_MISC,
   3490 		    NULL, xname, "EEE Tx LPI");
   3491 		evcnt_attach_dynamic(&sc->sc_ev_rlpic, EVCNT_TYPE_MISC,
   3492 		    NULL, xname, "EEE Rx LPI");
   3493 		evcnt_attach_dynamic(&sc->sc_ev_b2ogprc, EVCNT_TYPE_MISC,
   3494 		    NULL, xname, "BMC2OS Packets received by host");
   3495 		evcnt_attach_dynamic(&sc->sc_ev_o2bspc, EVCNT_TYPE_MISC,
   3496 		    NULL, xname, "OS2BMC Packets transmitted by host");
   3497 		evcnt_attach_dynamic(&sc->sc_ev_b2ospc, EVCNT_TYPE_MISC,
   3498 		    NULL, xname, "BMC2OS Packets sent by BMC");
   3499 		evcnt_attach_dynamic(&sc->sc_ev_o2bgptc, EVCNT_TYPE_MISC,
   3500 		    NULL, xname, "OS2BMC Packets received by BMC");
   3501 	}
   3502 #endif /* WM_EVENT_COUNTERS */
   3503 
   3504 	sc->sc_txrx_use_workqueue = false;
   3505 
   3506 	if (wm_phy_need_linkdown_discard(sc)) {
   3507 		DPRINTF(sc, WM_DEBUG_LINK,
   3508 		    ("%s: %s: Set linkdown discard flag\n",
   3509 			device_xname(sc->sc_dev), __func__));
   3510 		wm_set_linkdown_discard(sc);
   3511 	}
   3512 
   3513 	wm_init_sysctls(sc);
   3514 
   3515 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3516 		pmf_class_network_register(self, ifp);
   3517 	else
   3518 		aprint_error_dev(self, "couldn't establish power handler\n");
   3519 
   3520 	sc->sc_flags |= WM_F_ATTACHED;
   3521 out:
   3522 	return;
   3523 }
   3524 
   3525 /* The detach function (ca_detach) */
   3526 static int
   3527 wm_detach(device_t self, int flags __unused)
   3528 {
   3529 	struct wm_softc *sc = device_private(self);
   3530 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3531 	int i;
   3532 
   3533 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3534 		return 0;
   3535 
   3536 	/* Stop the interface. Callouts are stopped in it. */
   3537 	IFNET_LOCK(ifp);
   3538 	sc->sc_dying = true;
   3539 	wm_stop(ifp, 1);
   3540 	IFNET_UNLOCK(ifp);
   3541 
   3542 	pmf_device_deregister(self);
   3543 
   3544 	sysctl_teardown(&sc->sc_sysctllog);
   3545 
   3546 #ifdef WM_EVENT_COUNTERS
   3547 	evcnt_detach(&sc->sc_ev_linkintr);
   3548 
   3549 	evcnt_detach(&sc->sc_ev_crcerrs);
   3550 	evcnt_detach(&sc->sc_ev_symerrc);
   3551 	evcnt_detach(&sc->sc_ev_mpc);
   3552 	evcnt_detach(&sc->sc_ev_colc);
   3553 	evcnt_detach(&sc->sc_ev_sec);
   3554 	evcnt_detach(&sc->sc_ev_rlec);
   3555 
   3556 	if (sc->sc_type >= WM_T_82543) {
   3557 		evcnt_detach(&sc->sc_ev_algnerrc);
   3558 		evcnt_detach(&sc->sc_ev_rxerrc);
   3559 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   3560 			evcnt_detach(&sc->sc_ev_cexterr);
   3561 		else
   3562 			evcnt_detach(&sc->sc_ev_htdpmc);
   3563 
   3564 		evcnt_detach(&sc->sc_ev_tncrs);
   3565 		evcnt_detach(&sc->sc_ev_tsctc);
   3566 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   3567 			evcnt_detach(&sc->sc_ev_tsctfc);
   3568 		else {
   3569 			evcnt_detach(&sc->sc_ev_cbrdpc);
   3570 			evcnt_detach(&sc->sc_ev_cbrmpc);
   3571 		}
   3572 	}
   3573 
   3574 	if (sc->sc_type >= WM_T_82542_2_1) {
   3575 		evcnt_detach(&sc->sc_ev_tx_xoff);
   3576 		evcnt_detach(&sc->sc_ev_tx_xon);
   3577 		evcnt_detach(&sc->sc_ev_rx_xoff);
   3578 		evcnt_detach(&sc->sc_ev_rx_xon);
   3579 		evcnt_detach(&sc->sc_ev_rx_macctl);
   3580 	}
   3581 
   3582 	evcnt_detach(&sc->sc_ev_scc);
   3583 	evcnt_detach(&sc->sc_ev_ecol);
   3584 	evcnt_detach(&sc->sc_ev_mcc);
   3585 	evcnt_detach(&sc->sc_ev_latecol);
   3586 
   3587 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
   3588 		evcnt_detach(&sc->sc_ev_cbtmpc);
   3589 
   3590 	evcnt_detach(&sc->sc_ev_dc);
   3591 	evcnt_detach(&sc->sc_ev_prc64);
   3592 	evcnt_detach(&sc->sc_ev_prc127);
   3593 	evcnt_detach(&sc->sc_ev_prc255);
   3594 	evcnt_detach(&sc->sc_ev_prc511);
   3595 	evcnt_detach(&sc->sc_ev_prc1023);
   3596 	evcnt_detach(&sc->sc_ev_prc1522);
   3597 	evcnt_detach(&sc->sc_ev_gprc);
   3598 	evcnt_detach(&sc->sc_ev_bprc);
   3599 	evcnt_detach(&sc->sc_ev_mprc);
   3600 	evcnt_detach(&sc->sc_ev_gptc);
   3601 	evcnt_detach(&sc->sc_ev_gorc);
   3602 	evcnt_detach(&sc->sc_ev_gotc);
   3603 	evcnt_detach(&sc->sc_ev_rnbc);
   3604 	evcnt_detach(&sc->sc_ev_ruc);
   3605 	evcnt_detach(&sc->sc_ev_rfc);
   3606 	evcnt_detach(&sc->sc_ev_roc);
   3607 	evcnt_detach(&sc->sc_ev_rjc);
   3608 	if (sc->sc_type >= WM_T_82540) {
   3609 		evcnt_detach(&sc->sc_ev_mgtprc);
   3610 		evcnt_detach(&sc->sc_ev_mgtpdc);
   3611 		evcnt_detach(&sc->sc_ev_mgtptc);
   3612 	}
   3613 	evcnt_detach(&sc->sc_ev_tor);
   3614 	evcnt_detach(&sc->sc_ev_tot);
   3615 	evcnt_detach(&sc->sc_ev_tpr);
   3616 	evcnt_detach(&sc->sc_ev_tpt);
   3617 	evcnt_detach(&sc->sc_ev_ptc64);
   3618 	evcnt_detach(&sc->sc_ev_ptc127);
   3619 	evcnt_detach(&sc->sc_ev_ptc255);
   3620 	evcnt_detach(&sc->sc_ev_ptc511);
   3621 	evcnt_detach(&sc->sc_ev_ptc1023);
   3622 	evcnt_detach(&sc->sc_ev_ptc1522);
   3623 	evcnt_detach(&sc->sc_ev_mptc);
   3624 	evcnt_detach(&sc->sc_ev_bptc);
   3625 	if (sc->sc_type >= WM_T_82571)
   3626 		evcnt_detach(&sc->sc_ev_iac);
   3627 	if (sc->sc_type < WM_T_82575) {
   3628 		evcnt_detach(&sc->sc_ev_icrxptc);
   3629 		evcnt_detach(&sc->sc_ev_icrxatc);
   3630 		evcnt_detach(&sc->sc_ev_ictxptc);
   3631 		evcnt_detach(&sc->sc_ev_ictxatc);
   3632 		evcnt_detach(&sc->sc_ev_ictxqec);
   3633 		evcnt_detach(&sc->sc_ev_ictxqmtc);
   3634 		evcnt_detach(&sc->sc_ev_rxdmtc);
   3635 		evcnt_detach(&sc->sc_ev_icrxoc);
   3636 	} else if (!WM_IS_ICHPCH(sc)) {
   3637 		evcnt_detach(&sc->sc_ev_rpthc);
   3638 		evcnt_detach(&sc->sc_ev_debug1);
   3639 		evcnt_detach(&sc->sc_ev_debug2);
   3640 		evcnt_detach(&sc->sc_ev_debug3);
   3641 		evcnt_detach(&sc->sc_ev_hgptc);
   3642 		evcnt_detach(&sc->sc_ev_debug4);
   3643 		evcnt_detach(&sc->sc_ev_rxdmtc);
   3644 		evcnt_detach(&sc->sc_ev_htcbdpc);
   3645 
   3646 		evcnt_detach(&sc->sc_ev_hgorc);
   3647 		evcnt_detach(&sc->sc_ev_hgotc);
   3648 		evcnt_detach(&sc->sc_ev_lenerrs);
   3649 		evcnt_detach(&sc->sc_ev_scvpc);
   3650 		evcnt_detach(&sc->sc_ev_hrmpc);
   3651 	}
   3652 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   3653 		evcnt_detach(&sc->sc_ev_tlpic);
   3654 		evcnt_detach(&sc->sc_ev_rlpic);
   3655 		evcnt_detach(&sc->sc_ev_b2ogprc);
   3656 		evcnt_detach(&sc->sc_ev_o2bspc);
   3657 		evcnt_detach(&sc->sc_ev_b2ospc);
   3658 		evcnt_detach(&sc->sc_ev_o2bgptc);
   3659 	}
   3660 #endif /* WM_EVENT_COUNTERS */
   3661 
   3662 	rnd_detach_source(&sc->rnd_source);
   3663 
   3664 	/* Tell the firmware about the release */
   3665 	mutex_enter(sc->sc_core_lock);
   3666 	wm_release_manageability(sc);
   3667 	wm_release_hw_control(sc);
   3668 	wm_enable_wakeup(sc);
   3669 	mutex_exit(sc->sc_core_lock);
   3670 
   3671 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3672 
   3673 	ether_ifdetach(ifp);
   3674 	if_detach(ifp);
   3675 	if_percpuq_destroy(sc->sc_ipq);
   3676 
   3677 	/* Delete all remaining media. */
   3678 	ifmedia_fini(&sc->sc_mii.mii_media);
   3679 
   3680 	/* Unload RX dmamaps and free mbufs */
   3681 	for (i = 0; i < sc->sc_nqueues; i++) {
   3682 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3683 		mutex_enter(rxq->rxq_lock);
   3684 		wm_rxdrain(rxq);
   3685 		mutex_exit(rxq->rxq_lock);
   3686 	}
   3687 	/* Must unlock here */
   3688 
   3689 	/* Disestablish the interrupt handler */
   3690 	for (i = 0; i < sc->sc_nintrs; i++) {
   3691 		if (sc->sc_ihs[i] != NULL) {
   3692 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3693 			sc->sc_ihs[i] = NULL;
   3694 		}
   3695 	}
   3696 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3697 
   3698 	/* wm_stop() ensured that the workqueues are stopped. */
   3699 	workqueue_destroy(sc->sc_queue_wq);
   3700 	workqueue_destroy(sc->sc_reset_wq);
   3701 
   3702 	for (i = 0; i < sc->sc_nqueues; i++)
   3703 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3704 
   3705 	wm_free_txrx_queues(sc);
   3706 
   3707 	/* Unmap the registers */
   3708 	if (sc->sc_ss) {
   3709 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3710 		sc->sc_ss = 0;
   3711 	}
   3712 	if (sc->sc_ios) {
   3713 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3714 		sc->sc_ios = 0;
   3715 	}
   3716 	if (sc->sc_flashs) {
   3717 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3718 		sc->sc_flashs = 0;
   3719 	}
   3720 
   3721 	if (sc->sc_core_lock)
   3722 		mutex_obj_free(sc->sc_core_lock);
   3723 	if (sc->sc_ich_phymtx)
   3724 		mutex_obj_free(sc->sc_ich_phymtx);
   3725 	if (sc->sc_ich_nvmmtx)
   3726 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3727 
   3728 	return 0;
   3729 }
   3730 
   3731 static bool
   3732 wm_suspend(device_t self, const pmf_qual_t *qual)
   3733 {
   3734 	struct wm_softc *sc = device_private(self);
   3735 
   3736 	wm_release_manageability(sc);
   3737 	wm_release_hw_control(sc);
   3738 	wm_enable_wakeup(sc);
   3739 
   3740 	return true;
   3741 }
   3742 
   3743 static bool
   3744 wm_resume(device_t self, const pmf_qual_t *qual)
   3745 {
   3746 	struct wm_softc *sc = device_private(self);
   3747 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3748 	pcireg_t reg;
   3749 	char buf[256];
   3750 
   3751 	reg = CSR_READ(sc, WMREG_WUS);
   3752 	if (reg != 0) {
   3753 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3754 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3755 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3756 	}
   3757 
   3758 	if (sc->sc_type >= WM_T_PCH2)
   3759 		wm_resume_workarounds_pchlan(sc);
   3760 	IFNET_LOCK(ifp);
   3761 	if ((ifp->if_flags & IFF_UP) == 0) {
   3762 		/* >= PCH_SPT hardware workaround before reset. */
   3763 		if (sc->sc_type >= WM_T_PCH_SPT)
   3764 			wm_flush_desc_rings(sc);
   3765 
   3766 		wm_reset(sc);
   3767 		/* Non-AMT based hardware can now take control from firmware */
   3768 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3769 			wm_get_hw_control(sc);
   3770 		wm_init_manageability(sc);
   3771 	} else {
   3772 		/*
   3773 		 * We called pmf_class_network_register(), so if_init() is
   3774 		 * automatically called when IFF_UP. wm_reset(),
   3775 		 * wm_get_hw_control() and wm_init_manageability() are called
   3776 		 * via wm_init().
   3777 		 */
   3778 	}
   3779 	IFNET_UNLOCK(ifp);
   3780 
   3781 	return true;
   3782 }
   3783 
   3784 /*
   3785  * wm_watchdog:
   3786  *
   3787  *	Watchdog checker.
   3788  */
   3789 static bool
   3790 wm_watchdog(struct ifnet *ifp)
   3791 {
   3792 	int qid;
   3793 	struct wm_softc *sc = ifp->if_softc;
   3794 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3795 
   3796 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3797 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3798 
   3799 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3800 	}
   3801 
   3802 #ifdef WM_DEBUG
   3803 	if (sc->sc_trigger_reset) {
   3804 		/* debug operation, no need for atomicity or reliability */
   3805 		sc->sc_trigger_reset = 0;
   3806 		hang_queue++;
   3807 	}
   3808 #endif
   3809 
   3810 	if (hang_queue == 0)
   3811 		return true;
   3812 
   3813 	if (atomic_swap_uint(&sc->sc_reset_pending, 1) == 0)
   3814 		workqueue_enqueue(sc->sc_reset_wq, &sc->sc_reset_work, NULL);
   3815 
   3816 	return false;
   3817 }
   3818 
   3819 /*
   3820  * Perform an interface watchdog reset.
   3821  */
   3822 static void
   3823 wm_handle_reset_work(struct work *work, void *arg)
   3824 {
   3825 	struct wm_softc * const sc = arg;
   3826 	struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
   3827 
   3828 	/* Don't want ioctl operations to happen */
   3829 	IFNET_LOCK(ifp);
   3830 
   3831 	/* reset the interface. */
   3832 	wm_init(ifp);
   3833 
   3834 	IFNET_UNLOCK(ifp);
   3835 
   3836 	/*
   3837 	 * There are still some upper layer processing which call
   3838 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   3839 	 */
   3840 	/* Try to get more packets going. */
   3841 	ifp->if_start(ifp);
   3842 
   3843 	atomic_store_relaxed(&sc->sc_reset_pending, 0);
   3844 }
   3845 
   3846 
   3847 static void
   3848 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3849 {
   3850 
   3851 	mutex_enter(txq->txq_lock);
   3852 	if (txq->txq_sending &&
   3853 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3854 		wm_watchdog_txq_locked(ifp, txq, hang);
   3855 
   3856 	mutex_exit(txq->txq_lock);
   3857 }
   3858 
   3859 static void
   3860 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3861     uint16_t *hang)
   3862 {
   3863 	struct wm_softc *sc = ifp->if_softc;
   3864 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3865 
   3866 	KASSERT(mutex_owned(txq->txq_lock));
   3867 
   3868 	/*
   3869 	 * Since we're using delayed interrupts, sweep up
   3870 	 * before we report an error.
   3871 	 */
   3872 	wm_txeof(txq, UINT_MAX);
   3873 
   3874 	if (txq->txq_sending)
   3875 		*hang |= __BIT(wmq->wmq_id);
   3876 
   3877 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3878 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3879 		    device_xname(sc->sc_dev));
   3880 	} else {
   3881 #ifdef WM_DEBUG
   3882 		int i, j;
   3883 		struct wm_txsoft *txs;
   3884 #endif
   3885 		log(LOG_ERR,
   3886 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3887 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3888 		    txq->txq_next);
   3889 		if_statinc(ifp, if_oerrors);
   3890 #ifdef WM_DEBUG
   3891 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3892 		     i = WM_NEXTTXS(txq, i)) {
   3893 			txs = &txq->txq_soft[i];
   3894 			printf("txs %d tx %d -> %d\n",
   3895 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3896 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3897 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3898 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3899 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3900 					printf("\t %#08x%08x\n",
   3901 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3902 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3903 				} else {
   3904 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3905 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3906 					    txq->txq_descs[j].wtx_addr.wa_low);
   3907 					printf("\t %#04x%02x%02x%08x\n",
   3908 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3909 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3910 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3911 					    txq->txq_descs[j].wtx_cmdlen);
   3912 				}
   3913 				if (j == txs->txs_lastdesc)
   3914 					break;
   3915 			}
   3916 		}
   3917 #endif
   3918 	}
   3919 }
   3920 
   3921 /*
   3922  * wm_tick:
   3923  *
   3924  *	One second timer, used to check link status, sweep up
   3925  *	completed transmit jobs, etc.
   3926  */
   3927 static void
   3928 wm_tick(void *arg)
   3929 {
   3930 	struct wm_softc *sc = arg;
   3931 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3932 
   3933 	mutex_enter(sc->sc_core_lock);
   3934 
   3935 	if (sc->sc_core_stopping) {
   3936 		mutex_exit(sc->sc_core_lock);
   3937 		return;
   3938 	}
   3939 
   3940 	wm_update_stats(sc);
   3941 
   3942 	if (sc->sc_flags & WM_F_HAS_MII) {
   3943 		bool dotick = true;
   3944 
   3945 		/*
   3946 		 * Workaround for some chips to delay sending LINK_STATE_UP.
   3947 		 * See also wm_linkintr_gmii() and wm_gmii_mediastatus().
   3948 		 */
   3949 		if ((sc->sc_flags & WM_F_DELAY_LINKUP) != 0) {
   3950 			struct timeval now;
   3951 
   3952 			getmicrotime(&now);
   3953 			if (timercmp(&now, &sc->sc_linkup_delay_time, <))
   3954 				dotick = false;
   3955 			else if (sc->sc_linkup_delay_time.tv_sec != 0) {
   3956 				/* Simplify by checking tv_sec only. */
   3957 
   3958 				sc->sc_linkup_delay_time.tv_sec = 0;
   3959 				sc->sc_linkup_delay_time.tv_usec = 0;
   3960 			}
   3961 		}
   3962 		if (dotick)
   3963 			mii_tick(&sc->sc_mii);
   3964 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3965 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3966 		wm_serdes_tick(sc);
   3967 	else
   3968 		wm_tbi_tick(sc);
   3969 
   3970 	mutex_exit(sc->sc_core_lock);
   3971 
   3972 	if (wm_watchdog(ifp))
   3973 		callout_schedule(&sc->sc_tick_ch, hz);
   3974 }
   3975 
   3976 static int
   3977 wm_ifflags_cb(struct ethercom *ec)
   3978 {
   3979 	struct ifnet *ifp = &ec->ec_if;
   3980 	struct wm_softc *sc = ifp->if_softc;
   3981 	u_short iffchange;
   3982 	int ecchange;
   3983 	bool needreset = false;
   3984 	int rc = 0;
   3985 
   3986 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3987 		device_xname(sc->sc_dev), __func__));
   3988 
   3989 	KASSERT(IFNET_LOCKED(ifp));
   3990 
   3991 	mutex_enter(sc->sc_core_lock);
   3992 
   3993 	/*
   3994 	 * Check for if_flags.
   3995 	 * Main usage is to prevent linkdown when opening bpf.
   3996 	 */
   3997 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3998 	sc->sc_if_flags = ifp->if_flags;
   3999 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   4000 		needreset = true;
   4001 		goto ec;
   4002 	}
   4003 
   4004 	/* iff related updates */
   4005 	if ((iffchange & IFF_PROMISC) != 0)
   4006 		wm_set_filter(sc);
   4007 
   4008 	wm_set_vlan(sc);
   4009 
   4010 ec:
   4011 	/* Check for ec_capenable. */
   4012 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   4013 	sc->sc_ec_capenable = ec->ec_capenable;
   4014 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   4015 		needreset = true;
   4016 		goto out;
   4017 	}
   4018 
   4019 	/* ec related updates */
   4020 	wm_set_eee(sc);
   4021 
   4022 out:
   4023 	if (needreset)
   4024 		rc = ENETRESET;
   4025 	mutex_exit(sc->sc_core_lock);
   4026 
   4027 	return rc;
   4028 }
   4029 
   4030 static bool
   4031 wm_phy_need_linkdown_discard(struct wm_softc *sc)
   4032 {
   4033 
   4034 	switch (sc->sc_phytype) {
   4035 	case WMPHY_82577: /* ihphy */
   4036 	case WMPHY_82578: /* atphy */
   4037 	case WMPHY_82579: /* ihphy */
   4038 	case WMPHY_I217: /* ihphy */
   4039 	case WMPHY_82580: /* ihphy */
   4040 	case WMPHY_I350: /* ihphy */
   4041 		return true;
   4042 	default:
   4043 		return false;
   4044 	}
   4045 }
   4046 
   4047 static void
   4048 wm_set_linkdown_discard(struct wm_softc *sc)
   4049 {
   4050 
   4051 	for (int i = 0; i < sc->sc_nqueues; i++) {
   4052 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4053 
   4054 		mutex_enter(txq->txq_lock);
   4055 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
   4056 		mutex_exit(txq->txq_lock);
   4057 	}
   4058 }
   4059 
   4060 static void
   4061 wm_clear_linkdown_discard(struct wm_softc *sc)
   4062 {
   4063 
   4064 	for (int i = 0; i < sc->sc_nqueues; i++) {
   4065 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4066 
   4067 		mutex_enter(txq->txq_lock);
   4068 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
   4069 		mutex_exit(txq->txq_lock);
   4070 	}
   4071 }
   4072 
   4073 /*
   4074  * wm_ioctl:		[ifnet interface function]
   4075  *
   4076  *	Handle control requests from the operator.
   4077  */
   4078 static int
   4079 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   4080 {
   4081 	struct wm_softc *sc = ifp->if_softc;
   4082 	struct ifreq *ifr = (struct ifreq *)data;
   4083 	struct ifaddr *ifa = (struct ifaddr *)data;
   4084 	struct sockaddr_dl *sdl;
   4085 	int error;
   4086 
   4087 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4088 		device_xname(sc->sc_dev), __func__));
   4089 
   4090 	switch (cmd) {
   4091 	case SIOCADDMULTI:
   4092 	case SIOCDELMULTI:
   4093 		break;
   4094 	default:
   4095 		KASSERT(IFNET_LOCKED(ifp));
   4096 	}
   4097 
   4098 	if (cmd == SIOCZIFDATA) {
   4099 		/*
   4100 		 * Special handling for SIOCZIFDATA.
   4101 		 * Copying and clearing the if_data structure is done with
   4102 		 * ether_ioctl() below.
   4103 		 */
   4104 		mutex_enter(sc->sc_core_lock);
   4105 		wm_update_stats(sc);
   4106 		wm_clear_evcnt(sc);
   4107 		mutex_exit(sc->sc_core_lock);
   4108 	}
   4109 
   4110 	switch (cmd) {
   4111 	case SIOCSIFMEDIA:
   4112 		mutex_enter(sc->sc_core_lock);
   4113 		/* Flow control requires full-duplex mode. */
   4114 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   4115 		    (ifr->ifr_media & IFM_FDX) == 0)
   4116 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   4117 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   4118 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   4119 				/* We can do both TXPAUSE and RXPAUSE. */
   4120 				ifr->ifr_media |=
   4121 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   4122 			}
   4123 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   4124 		}
   4125 		mutex_exit(sc->sc_core_lock);
   4126 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   4127 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
   4128 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE) {
   4129 				DPRINTF(sc, WM_DEBUG_LINK,
   4130 				    ("%s: %s: Set linkdown discard flag\n",
   4131 					device_xname(sc->sc_dev), __func__));
   4132 				wm_set_linkdown_discard(sc);
   4133 			}
   4134 		}
   4135 		break;
   4136 	case SIOCINITIFADDR:
   4137 		mutex_enter(sc->sc_core_lock);
   4138 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   4139 			sdl = satosdl(ifp->if_dl->ifa_addr);
   4140 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   4141 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   4142 			/* Unicast address is the first multicast entry */
   4143 			wm_set_filter(sc);
   4144 			error = 0;
   4145 			mutex_exit(sc->sc_core_lock);
   4146 			break;
   4147 		}
   4148 		mutex_exit(sc->sc_core_lock);
   4149 		/*FALLTHROUGH*/
   4150 	default:
   4151 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
   4152 			if (((ifp->if_flags & IFF_UP) != 0) &&
   4153 			    ((ifr->ifr_flags & IFF_UP) == 0)) {
   4154 				DPRINTF(sc, WM_DEBUG_LINK,
   4155 				    ("%s: %s: Set linkdown discard flag\n",
   4156 					device_xname(sc->sc_dev), __func__));
   4157 				wm_set_linkdown_discard(sc);
   4158 			}
   4159 		}
   4160 		const int s = splnet();
   4161 		/* It may call wm_start, so unlock here */
   4162 		error = ether_ioctl(ifp, cmd, data);
   4163 		splx(s);
   4164 		if (error != ENETRESET)
   4165 			break;
   4166 
   4167 		error = 0;
   4168 
   4169 		if (cmd == SIOCSIFCAP)
   4170 			error = if_init(ifp);
   4171 		else if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
   4172 			mutex_enter(sc->sc_core_lock);
   4173 			if (sc->sc_if_flags & IFF_RUNNING) {
   4174 				/*
   4175 				 * Multicast list has changed; set the
   4176 				 * hardware filter accordingly.
   4177 				 */
   4178 				wm_set_filter(sc);
   4179 			}
   4180 			mutex_exit(sc->sc_core_lock);
   4181 		}
   4182 		break;
   4183 	}
   4184 
   4185 	return error;
   4186 }
   4187 
   4188 /* MAC address related */
   4189 
   4190 /*
   4191  * Get the offset of MAC address and return it.
   4192  * If error occured, use offset 0.
   4193  */
   4194 static uint16_t
   4195 wm_check_alt_mac_addr(struct wm_softc *sc)
   4196 {
   4197 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4198 	uint16_t offset = NVM_OFF_MACADDR;
   4199 
   4200 	/* Try to read alternative MAC address pointer */
   4201 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   4202 		return 0;
   4203 
   4204 	/* Check pointer if it's valid or not. */
   4205 	if ((offset == 0x0000) || (offset == 0xffff))
   4206 		return 0;
   4207 
   4208 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   4209 	/*
   4210 	 * Check whether alternative MAC address is valid or not.
   4211 	 * Some cards have non 0xffff pointer but those don't use
   4212 	 * alternative MAC address in reality.
   4213 	 *
   4214 	 * Check whether the broadcast bit is set or not.
   4215 	 */
   4216 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   4217 		if (((myea[0] & 0xff) & 0x01) == 0)
   4218 			return offset; /* Found */
   4219 
   4220 	/* Not found */
   4221 	return 0;
   4222 }
   4223 
   4224 static int
   4225 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   4226 {
   4227 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4228 	uint16_t offset = NVM_OFF_MACADDR;
   4229 	int do_invert = 0;
   4230 
   4231 	switch (sc->sc_type) {
   4232 	case WM_T_82580:
   4233 	case WM_T_I350:
   4234 	case WM_T_I354:
   4235 		/* EEPROM Top Level Partitioning */
   4236 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   4237 		break;
   4238 	case WM_T_82571:
   4239 	case WM_T_82575:
   4240 	case WM_T_82576:
   4241 	case WM_T_80003:
   4242 	case WM_T_I210:
   4243 	case WM_T_I211:
   4244 		offset = wm_check_alt_mac_addr(sc);
   4245 		if (offset == 0)
   4246 			if ((sc->sc_funcid & 0x01) == 1)
   4247 				do_invert = 1;
   4248 		break;
   4249 	default:
   4250 		if ((sc->sc_funcid & 0x01) == 1)
   4251 			do_invert = 1;
   4252 		break;
   4253 	}
   4254 
   4255 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   4256 		goto bad;
   4257 
   4258 	enaddr[0] = myea[0] & 0xff;
   4259 	enaddr[1] = myea[0] >> 8;
   4260 	enaddr[2] = myea[1] & 0xff;
   4261 	enaddr[3] = myea[1] >> 8;
   4262 	enaddr[4] = myea[2] & 0xff;
   4263 	enaddr[5] = myea[2] >> 8;
   4264 
   4265 	/*
   4266 	 * Toggle the LSB of the MAC address on the second port
   4267 	 * of some dual port cards.
   4268 	 */
   4269 	if (do_invert != 0)
   4270 		enaddr[5] ^= 1;
   4271 
   4272 	return 0;
   4273 
   4274 bad:
   4275 	return -1;
   4276 }
   4277 
   4278 /*
   4279  * wm_set_ral:
   4280  *
   4281  *	Set an entery in the receive address list.
   4282  */
   4283 static void
   4284 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   4285 {
   4286 	uint32_t ral_lo, ral_hi, addrl, addrh;
   4287 	uint32_t wlock_mac;
   4288 	int rv;
   4289 
   4290 	if (enaddr != NULL) {
   4291 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   4292 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   4293 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   4294 		ral_hi |= RAL_AV;
   4295 	} else {
   4296 		ral_lo = 0;
   4297 		ral_hi = 0;
   4298 	}
   4299 
   4300 	switch (sc->sc_type) {
   4301 	case WM_T_82542_2_0:
   4302 	case WM_T_82542_2_1:
   4303 	case WM_T_82543:
   4304 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   4305 		CSR_WRITE_FLUSH(sc);
   4306 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   4307 		CSR_WRITE_FLUSH(sc);
   4308 		break;
   4309 	case WM_T_PCH2:
   4310 	case WM_T_PCH_LPT:
   4311 	case WM_T_PCH_SPT:
   4312 	case WM_T_PCH_CNP:
   4313 	case WM_T_PCH_TGP:
   4314 		if (idx == 0) {
   4315 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4316 			CSR_WRITE_FLUSH(sc);
   4317 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4318 			CSR_WRITE_FLUSH(sc);
   4319 			return;
   4320 		}
   4321 		if (sc->sc_type != WM_T_PCH2) {
   4322 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   4323 			    FWSM_WLOCK_MAC);
   4324 			addrl = WMREG_SHRAL(idx - 1);
   4325 			addrh = WMREG_SHRAH(idx - 1);
   4326 		} else {
   4327 			wlock_mac = 0;
   4328 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   4329 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   4330 		}
   4331 
   4332 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   4333 			rv = wm_get_swflag_ich8lan(sc);
   4334 			if (rv != 0)
   4335 				return;
   4336 			CSR_WRITE(sc, addrl, ral_lo);
   4337 			CSR_WRITE_FLUSH(sc);
   4338 			CSR_WRITE(sc, addrh, ral_hi);
   4339 			CSR_WRITE_FLUSH(sc);
   4340 			wm_put_swflag_ich8lan(sc);
   4341 		}
   4342 
   4343 		break;
   4344 	default:
   4345 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4346 		CSR_WRITE_FLUSH(sc);
   4347 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4348 		CSR_WRITE_FLUSH(sc);
   4349 		break;
   4350 	}
   4351 }
   4352 
   4353 /*
   4354  * wm_mchash:
   4355  *
   4356  *	Compute the hash of the multicast address for the 4096-bit
   4357  *	multicast filter.
   4358  */
   4359 static uint32_t
   4360 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   4361 {
   4362 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   4363 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   4364 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   4365 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   4366 	uint32_t hash;
   4367 
   4368 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4369 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4370 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4371 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)
   4372 	    || (sc->sc_type == WM_T_PCH_TGP)) {
   4373 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   4374 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   4375 		return (hash & 0x3ff);
   4376 	}
   4377 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   4378 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   4379 
   4380 	return (hash & 0xfff);
   4381 }
   4382 
   4383 /*
   4384  *
   4385  *
   4386  */
   4387 static int
   4388 wm_rar_count(struct wm_softc *sc)
   4389 {
   4390 	int size;
   4391 
   4392 	switch (sc->sc_type) {
   4393 	case WM_T_ICH8:
   4394 		size = WM_RAL_TABSIZE_ICH8 -1;
   4395 		break;
   4396 	case WM_T_ICH9:
   4397 	case WM_T_ICH10:
   4398 	case WM_T_PCH:
   4399 		size = WM_RAL_TABSIZE_ICH8;
   4400 		break;
   4401 	case WM_T_PCH2:
   4402 		size = WM_RAL_TABSIZE_PCH2;
   4403 		break;
   4404 	case WM_T_PCH_LPT:
   4405 	case WM_T_PCH_SPT:
   4406 	case WM_T_PCH_CNP:
   4407 	case WM_T_PCH_TGP:
   4408 		size = WM_RAL_TABSIZE_PCH_LPT;
   4409 		break;
   4410 	case WM_T_82575:
   4411 	case WM_T_I210:
   4412 	case WM_T_I211:
   4413 		size = WM_RAL_TABSIZE_82575;
   4414 		break;
   4415 	case WM_T_82576:
   4416 	case WM_T_82580:
   4417 		size = WM_RAL_TABSIZE_82576;
   4418 		break;
   4419 	case WM_T_I350:
   4420 	case WM_T_I354:
   4421 		size = WM_RAL_TABSIZE_I350;
   4422 		break;
   4423 	default:
   4424 		size = WM_RAL_TABSIZE;
   4425 	}
   4426 
   4427 	return size;
   4428 }
   4429 
   4430 /*
   4431  * wm_set_filter:
   4432  *
   4433  *	Set up the receive filter.
   4434  */
   4435 static void
   4436 wm_set_filter(struct wm_softc *sc)
   4437 {
   4438 	struct ethercom *ec = &sc->sc_ethercom;
   4439 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   4440 	struct ether_multi *enm;
   4441 	struct ether_multistep step;
   4442 	bus_addr_t mta_reg;
   4443 	uint32_t hash, reg, bit;
   4444 	int i, size, ralmax, rv;
   4445 
   4446 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4447 		device_xname(sc->sc_dev), __func__));
   4448 	KASSERT(mutex_owned(sc->sc_core_lock));
   4449 
   4450 	if (sc->sc_type >= WM_T_82544)
   4451 		mta_reg = WMREG_CORDOVA_MTA;
   4452 	else
   4453 		mta_reg = WMREG_MTA;
   4454 
   4455 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   4456 
   4457 	if (sc->sc_if_flags & IFF_BROADCAST)
   4458 		sc->sc_rctl |= RCTL_BAM;
   4459 	if (sc->sc_if_flags & IFF_PROMISC) {
   4460 		sc->sc_rctl |= RCTL_UPE;
   4461 		ETHER_LOCK(ec);
   4462 		ec->ec_flags |= ETHER_F_ALLMULTI;
   4463 		ETHER_UNLOCK(ec);
   4464 		goto allmulti;
   4465 	}
   4466 
   4467 	/*
   4468 	 * Set the station address in the first RAL slot, and
   4469 	 * clear the remaining slots.
   4470 	 */
   4471 	size = wm_rar_count(sc);
   4472 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   4473 
   4474 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT) ||
   4475 	    (sc->sc_type == WM_T_PCH_CNP) || (sc->sc_type == WM_T_PCH_TGP)) {
   4476 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   4477 		switch (i) {
   4478 		case 0:
   4479 			/* We can use all entries */
   4480 			ralmax = size;
   4481 			break;
   4482 		case 1:
   4483 			/* Only RAR[0] */
   4484 			ralmax = 1;
   4485 			break;
   4486 		default:
   4487 			/* Available SHRA + RAR[0] */
   4488 			ralmax = i + 1;
   4489 		}
   4490 	} else
   4491 		ralmax = size;
   4492 	for (i = 1; i < size; i++) {
   4493 		if (i < ralmax)
   4494 			wm_set_ral(sc, NULL, i);
   4495 	}
   4496 
   4497 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4498 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4499 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4500 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)
   4501 	    || (sc->sc_type == WM_T_PCH_TGP))
   4502 		size = WM_ICH8_MC_TABSIZE;
   4503 	else
   4504 		size = WM_MC_TABSIZE;
   4505 	/* Clear out the multicast table. */
   4506 	for (i = 0; i < size; i++) {
   4507 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   4508 		CSR_WRITE_FLUSH(sc);
   4509 	}
   4510 
   4511 	ETHER_LOCK(ec);
   4512 	ETHER_FIRST_MULTI(step, ec, enm);
   4513 	while (enm != NULL) {
   4514 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   4515 			ec->ec_flags |= ETHER_F_ALLMULTI;
   4516 			ETHER_UNLOCK(ec);
   4517 			/*
   4518 			 * We must listen to a range of multicast addresses.
   4519 			 * For now, just accept all multicasts, rather than
   4520 			 * trying to set only those filter bits needed to match
   4521 			 * the range.  (At this time, the only use of address
   4522 			 * ranges is for IP multicast routing, for which the
   4523 			 * range is big enough to require all bits set.)
   4524 			 */
   4525 			goto allmulti;
   4526 		}
   4527 
   4528 		hash = wm_mchash(sc, enm->enm_addrlo);
   4529 
   4530 		reg = (hash >> 5);
   4531 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4532 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4533 		    || (sc->sc_type == WM_T_PCH2)
   4534 		    || (sc->sc_type == WM_T_PCH_LPT)
   4535 		    || (sc->sc_type == WM_T_PCH_SPT)
   4536 		    || (sc->sc_type == WM_T_PCH_CNP)
   4537 		    || (sc->sc_type == WM_T_PCH_TGP))
   4538 			reg &= 0x1f;
   4539 		else
   4540 			reg &= 0x7f;
   4541 		bit = hash & 0x1f;
   4542 
   4543 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   4544 		hash |= 1U << bit;
   4545 
   4546 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   4547 			/*
   4548 			 * 82544 Errata 9: Certain register cannot be written
   4549 			 * with particular alignments in PCI-X bus operation
   4550 			 * (FCAH, MTA and VFTA).
   4551 			 */
   4552 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   4553 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4554 			CSR_WRITE_FLUSH(sc);
   4555 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   4556 			CSR_WRITE_FLUSH(sc);
   4557 		} else {
   4558 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4559 			CSR_WRITE_FLUSH(sc);
   4560 		}
   4561 
   4562 		ETHER_NEXT_MULTI(step, enm);
   4563 	}
   4564 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4565 	ETHER_UNLOCK(ec);
   4566 
   4567 	goto setit;
   4568 
   4569 allmulti:
   4570 	sc->sc_rctl |= RCTL_MPE;
   4571 
   4572 setit:
   4573 	if (sc->sc_type >= WM_T_PCH2) {
   4574 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4575 		    && (ifp->if_mtu > ETHERMTU))
   4576 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
   4577 		else
   4578 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
   4579 		if (rv != 0)
   4580 			device_printf(sc->sc_dev,
   4581 			    "Failed to do workaround for jumbo frame.\n");
   4582 	}
   4583 
   4584 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   4585 }
   4586 
   4587 /* Reset and init related */
   4588 
   4589 static void
   4590 wm_set_vlan(struct wm_softc *sc)
   4591 {
   4592 
   4593 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4594 		device_xname(sc->sc_dev), __func__));
   4595 
   4596 	/* Deal with VLAN enables. */
   4597 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   4598 		sc->sc_ctrl |= CTRL_VME;
   4599 	else
   4600 		sc->sc_ctrl &= ~CTRL_VME;
   4601 
   4602 	/* Write the control registers. */
   4603 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4604 }
   4605 
   4606 static void
   4607 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   4608 {
   4609 	uint32_t gcr;
   4610 	pcireg_t ctrl2;
   4611 
   4612 	gcr = CSR_READ(sc, WMREG_GCR);
   4613 
   4614 	/* Only take action if timeout value is defaulted to 0 */
   4615 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   4616 		goto out;
   4617 
   4618 	if ((gcr & GCR_CAP_VER2) == 0) {
   4619 		gcr |= GCR_CMPL_TMOUT_10MS;
   4620 		goto out;
   4621 	}
   4622 
   4623 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4624 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   4625 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   4626 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4627 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   4628 
   4629 out:
   4630 	/* Disable completion timeout resend */
   4631 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   4632 
   4633 	CSR_WRITE(sc, WMREG_GCR, gcr);
   4634 }
   4635 
   4636 void
   4637 wm_get_auto_rd_done(struct wm_softc *sc)
   4638 {
   4639 	int i;
   4640 
   4641 	/* wait for eeprom to reload */
   4642 	switch (sc->sc_type) {
   4643 	case WM_T_82571:
   4644 	case WM_T_82572:
   4645 	case WM_T_82573:
   4646 	case WM_T_82574:
   4647 	case WM_T_82583:
   4648 	case WM_T_82575:
   4649 	case WM_T_82576:
   4650 	case WM_T_82580:
   4651 	case WM_T_I350:
   4652 	case WM_T_I354:
   4653 	case WM_T_I210:
   4654 	case WM_T_I211:
   4655 	case WM_T_80003:
   4656 	case WM_T_ICH8:
   4657 	case WM_T_ICH9:
   4658 		for (i = 0; i < 10; i++) {
   4659 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   4660 				break;
   4661 			delay(1000);
   4662 		}
   4663 		if (i == 10) {
   4664 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4665 			    "complete\n", device_xname(sc->sc_dev));
   4666 		}
   4667 		break;
   4668 	default:
   4669 		break;
   4670 	}
   4671 }
   4672 
   4673 void
   4674 wm_lan_init_done(struct wm_softc *sc)
   4675 {
   4676 	uint32_t reg = 0;
   4677 	int i;
   4678 
   4679 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4680 		device_xname(sc->sc_dev), __func__));
   4681 
   4682 	/* Wait for eeprom to reload */
   4683 	switch (sc->sc_type) {
   4684 	case WM_T_ICH10:
   4685 	case WM_T_PCH:
   4686 	case WM_T_PCH2:
   4687 	case WM_T_PCH_LPT:
   4688 	case WM_T_PCH_SPT:
   4689 	case WM_T_PCH_CNP:
   4690 	case WM_T_PCH_TGP:
   4691 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4692 			reg = CSR_READ(sc, WMREG_STATUS);
   4693 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4694 				break;
   4695 			delay(100);
   4696 		}
   4697 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4698 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4699 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4700 		}
   4701 		break;
   4702 	default:
   4703 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4704 		    __func__);
   4705 		break;
   4706 	}
   4707 
   4708 	reg &= ~STATUS_LAN_INIT_DONE;
   4709 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4710 }
   4711 
   4712 void
   4713 wm_get_cfg_done(struct wm_softc *sc)
   4714 {
   4715 	int mask;
   4716 	uint32_t reg;
   4717 	int i;
   4718 
   4719 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4720 		device_xname(sc->sc_dev), __func__));
   4721 
   4722 	/* Wait for eeprom to reload */
   4723 	switch (sc->sc_type) {
   4724 	case WM_T_82542_2_0:
   4725 	case WM_T_82542_2_1:
   4726 		/* null */
   4727 		break;
   4728 	case WM_T_82543:
   4729 	case WM_T_82544:
   4730 	case WM_T_82540:
   4731 	case WM_T_82545:
   4732 	case WM_T_82545_3:
   4733 	case WM_T_82546:
   4734 	case WM_T_82546_3:
   4735 	case WM_T_82541:
   4736 	case WM_T_82541_2:
   4737 	case WM_T_82547:
   4738 	case WM_T_82547_2:
   4739 	case WM_T_82573:
   4740 	case WM_T_82574:
   4741 	case WM_T_82583:
   4742 		/* generic */
   4743 		delay(10*1000);
   4744 		break;
   4745 	case WM_T_80003:
   4746 	case WM_T_82571:
   4747 	case WM_T_82572:
   4748 	case WM_T_82575:
   4749 	case WM_T_82576:
   4750 	case WM_T_82580:
   4751 	case WM_T_I350:
   4752 	case WM_T_I354:
   4753 	case WM_T_I210:
   4754 	case WM_T_I211:
   4755 		if (sc->sc_type == WM_T_82571) {
   4756 			/* Only 82571 shares port 0 */
   4757 			mask = EEMNGCTL_CFGDONE_0;
   4758 		} else
   4759 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4760 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4761 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4762 				break;
   4763 			delay(1000);
   4764 		}
   4765 		if (i >= WM_PHY_CFG_TIMEOUT)
   4766 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
   4767 				device_xname(sc->sc_dev), __func__));
   4768 		break;
   4769 	case WM_T_ICH8:
   4770 	case WM_T_ICH9:
   4771 	case WM_T_ICH10:
   4772 	case WM_T_PCH:
   4773 	case WM_T_PCH2:
   4774 	case WM_T_PCH_LPT:
   4775 	case WM_T_PCH_SPT:
   4776 	case WM_T_PCH_CNP:
   4777 	case WM_T_PCH_TGP:
   4778 		delay(10*1000);
   4779 		if (sc->sc_type >= WM_T_ICH10)
   4780 			wm_lan_init_done(sc);
   4781 		else
   4782 			wm_get_auto_rd_done(sc);
   4783 
   4784 		/* Clear PHY Reset Asserted bit */
   4785 		reg = CSR_READ(sc, WMREG_STATUS);
   4786 		if ((reg & STATUS_PHYRA) != 0)
   4787 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4788 		break;
   4789 	default:
   4790 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4791 		    __func__);
   4792 		break;
   4793 	}
   4794 }
   4795 
   4796 int
   4797 wm_phy_post_reset(struct wm_softc *sc)
   4798 {
   4799 	device_t dev = sc->sc_dev;
   4800 	uint16_t reg;
   4801 	int rv = 0;
   4802 
   4803 	/* This function is only for ICH8 and newer. */
   4804 	if (sc->sc_type < WM_T_ICH8)
   4805 		return 0;
   4806 
   4807 	if (wm_phy_resetisblocked(sc)) {
   4808 		/* XXX */
   4809 		device_printf(dev, "PHY is blocked\n");
   4810 		return -1;
   4811 	}
   4812 
   4813 	/* Allow time for h/w to get to quiescent state after reset */
   4814 	delay(10*1000);
   4815 
   4816 	/* Perform any necessary post-reset workarounds */
   4817 	if (sc->sc_type == WM_T_PCH)
   4818 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4819 	else if (sc->sc_type == WM_T_PCH2)
   4820 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4821 	if (rv != 0)
   4822 		return rv;
   4823 
   4824 	/* Clear the host wakeup bit after lcd reset */
   4825 	if (sc->sc_type >= WM_T_PCH) {
   4826 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4827 		reg &= ~BM_WUC_HOST_WU_BIT;
   4828 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4829 	}
   4830 
   4831 	/* Configure the LCD with the extended configuration region in NVM */
   4832 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4833 		return rv;
   4834 
   4835 	/* Configure the LCD with the OEM bits in NVM */
   4836 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4837 
   4838 	if (sc->sc_type == WM_T_PCH2) {
   4839 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4840 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4841 			delay(10 * 1000);
   4842 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4843 		}
   4844 		/* Set EEE LPI Update Timer to 200usec */
   4845 		rv = sc->phy.acquire(sc);
   4846 		if (rv)
   4847 			return rv;
   4848 		rv = wm_write_emi_reg_locked(dev,
   4849 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4850 		sc->phy.release(sc);
   4851 	}
   4852 
   4853 	return rv;
   4854 }
   4855 
   4856 /* Only for PCH and newer */
   4857 static int
   4858 wm_write_smbus_addr(struct wm_softc *sc)
   4859 {
   4860 	uint32_t strap, freq;
   4861 	uint16_t phy_data;
   4862 	int rv;
   4863 
   4864 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4865 		device_xname(sc->sc_dev), __func__));
   4866 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4867 
   4868 	strap = CSR_READ(sc, WMREG_STRAP);
   4869 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4870 
   4871 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4872 	if (rv != 0)
   4873 		return rv;
   4874 
   4875 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4876 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4877 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4878 
   4879 	if (sc->sc_phytype == WMPHY_I217) {
   4880 		/* Restore SMBus frequency */
   4881 		if (freq --) {
   4882 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4883 			    | HV_SMB_ADDR_FREQ_HIGH);
   4884 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4885 			    HV_SMB_ADDR_FREQ_LOW);
   4886 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4887 			    HV_SMB_ADDR_FREQ_HIGH);
   4888 		} else
   4889 			DPRINTF(sc, WM_DEBUG_INIT,
   4890 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4891 				device_xname(sc->sc_dev), __func__));
   4892 	}
   4893 
   4894 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4895 	    phy_data);
   4896 }
   4897 
   4898 static int
   4899 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4900 {
   4901 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4902 	uint16_t phy_page = 0;
   4903 	int rv = 0;
   4904 
   4905 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4906 		device_xname(sc->sc_dev), __func__));
   4907 
   4908 	switch (sc->sc_type) {
   4909 	case WM_T_ICH8:
   4910 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4911 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4912 			return 0;
   4913 
   4914 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4915 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4916 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4917 			break;
   4918 		}
   4919 		/* FALLTHROUGH */
   4920 	case WM_T_PCH:
   4921 	case WM_T_PCH2:
   4922 	case WM_T_PCH_LPT:
   4923 	case WM_T_PCH_SPT:
   4924 	case WM_T_PCH_CNP:
   4925 	case WM_T_PCH_TGP:
   4926 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4927 		break;
   4928 	default:
   4929 		return 0;
   4930 	}
   4931 
   4932 	if ((rv = sc->phy.acquire(sc)) != 0)
   4933 		return rv;
   4934 
   4935 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4936 	if ((reg & sw_cfg_mask) == 0)
   4937 		goto release;
   4938 
   4939 	/*
   4940 	 * Make sure HW does not configure LCD from PHY extended configuration
   4941 	 * before SW configuration
   4942 	 */
   4943 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4944 	if ((sc->sc_type < WM_T_PCH2)
   4945 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4946 		goto release;
   4947 
   4948 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4949 		device_xname(sc->sc_dev), __func__));
   4950 	/* word_addr is in DWORD */
   4951 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4952 
   4953 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4954 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4955 	if (cnf_size == 0)
   4956 		goto release;
   4957 
   4958 	if (((sc->sc_type == WM_T_PCH)
   4959 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4960 	    || (sc->sc_type > WM_T_PCH)) {
   4961 		/*
   4962 		 * HW configures the SMBus address and LEDs when the OEM and
   4963 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4964 		 * are cleared, SW will configure them instead.
   4965 		 */
   4966 		DPRINTF(sc, WM_DEBUG_INIT,
   4967 		    ("%s: %s: Configure SMBus and LED\n",
   4968 			device_xname(sc->sc_dev), __func__));
   4969 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4970 			goto release;
   4971 
   4972 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4973 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4974 		    (uint16_t)reg);
   4975 		if (rv != 0)
   4976 			goto release;
   4977 	}
   4978 
   4979 	/* Configure LCD from extended configuration region. */
   4980 	for (i = 0; i < cnf_size; i++) {
   4981 		uint16_t reg_data, reg_addr;
   4982 
   4983 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4984 			goto release;
   4985 
   4986 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4987 			goto release;
   4988 
   4989 		if (reg_addr == IGPHY_PAGE_SELECT)
   4990 			phy_page = reg_data;
   4991 
   4992 		reg_addr &= IGPHY_MAXREGADDR;
   4993 		reg_addr |= phy_page;
   4994 
   4995 		KASSERT(sc->phy.writereg_locked != NULL);
   4996 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4997 		    reg_data);
   4998 	}
   4999 
   5000 release:
   5001 	sc->phy.release(sc);
   5002 	return rv;
   5003 }
   5004 
   5005 /*
   5006  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   5007  *  @sc:       pointer to the HW structure
   5008  *  @d0_state: boolean if entering d0 or d3 device state
   5009  *
   5010  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   5011  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   5012  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   5013  */
   5014 int
   5015 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   5016 {
   5017 	uint32_t mac_reg;
   5018 	uint16_t oem_reg;
   5019 	int rv;
   5020 
   5021 	if (sc->sc_type < WM_T_PCH)
   5022 		return 0;
   5023 
   5024 	rv = sc->phy.acquire(sc);
   5025 	if (rv != 0)
   5026 		return rv;
   5027 
   5028 	if (sc->sc_type == WM_T_PCH) {
   5029 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   5030 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   5031 			goto release;
   5032 	}
   5033 
   5034 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   5035 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   5036 		goto release;
   5037 
   5038 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   5039 
   5040 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   5041 	if (rv != 0)
   5042 		goto release;
   5043 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   5044 
   5045 	if (d0_state) {
   5046 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   5047 			oem_reg |= HV_OEM_BITS_A1KDIS;
   5048 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   5049 			oem_reg |= HV_OEM_BITS_LPLU;
   5050 	} else {
   5051 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   5052 		    != 0)
   5053 			oem_reg |= HV_OEM_BITS_A1KDIS;
   5054 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   5055 		    != 0)
   5056 			oem_reg |= HV_OEM_BITS_LPLU;
   5057 	}
   5058 
   5059 	/* Set Restart auto-neg to activate the bits */
   5060 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   5061 	    && (wm_phy_resetisblocked(sc) == false))
   5062 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   5063 
   5064 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   5065 
   5066 release:
   5067 	sc->phy.release(sc);
   5068 
   5069 	return rv;
   5070 }
   5071 
   5072 /* Init hardware bits */
   5073 void
   5074 wm_initialize_hardware_bits(struct wm_softc *sc)
   5075 {
   5076 	uint32_t tarc0, tarc1, reg;
   5077 
   5078 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5079 		device_xname(sc->sc_dev), __func__));
   5080 
   5081 	/* For 82571 variant, 80003 and ICHs */
   5082 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   5083 	    || WM_IS_ICHPCH(sc)) {
   5084 
   5085 		/* Transmit Descriptor Control 0 */
   5086 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   5087 		reg |= TXDCTL_COUNT_DESC;
   5088 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   5089 
   5090 		/* Transmit Descriptor Control 1 */
   5091 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   5092 		reg |= TXDCTL_COUNT_DESC;
   5093 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   5094 
   5095 		/* TARC0 */
   5096 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   5097 		switch (sc->sc_type) {
   5098 		case WM_T_82571:
   5099 		case WM_T_82572:
   5100 		case WM_T_82573:
   5101 		case WM_T_82574:
   5102 		case WM_T_82583:
   5103 		case WM_T_80003:
   5104 			/* Clear bits 30..27 */
   5105 			tarc0 &= ~__BITS(30, 27);
   5106 			break;
   5107 		default:
   5108 			break;
   5109 		}
   5110 
   5111 		switch (sc->sc_type) {
   5112 		case WM_T_82571:
   5113 		case WM_T_82572:
   5114 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   5115 
   5116 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5117 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   5118 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   5119 			/* 8257[12] Errata No.7 */
   5120 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   5121 
   5122 			/* TARC1 bit 28 */
   5123 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5124 				tarc1 &= ~__BIT(28);
   5125 			else
   5126 				tarc1 |= __BIT(28);
   5127 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5128 
   5129 			/*
   5130 			 * 8257[12] Errata No.13
   5131 			 * Disable Dyamic Clock Gating.
   5132 			 */
   5133 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5134 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   5135 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5136 			break;
   5137 		case WM_T_82573:
   5138 		case WM_T_82574:
   5139 		case WM_T_82583:
   5140 			if ((sc->sc_type == WM_T_82574)
   5141 			    || (sc->sc_type == WM_T_82583))
   5142 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   5143 
   5144 			/* Extended Device Control */
   5145 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5146 			reg &= ~__BIT(23);	/* Clear bit 23 */
   5147 			reg |= __BIT(22);	/* Set bit 22 */
   5148 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5149 
   5150 			/* Device Control */
   5151 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   5152 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5153 
   5154 			/* PCIe Control Register */
   5155 			/*
   5156 			 * 82573 Errata (unknown).
   5157 			 *
   5158 			 * 82574 Errata 25 and 82583 Errata 12
   5159 			 * "Dropped Rx Packets":
   5160 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   5161 			 */
   5162 			reg = CSR_READ(sc, WMREG_GCR);
   5163 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   5164 			CSR_WRITE(sc, WMREG_GCR, reg);
   5165 
   5166 			if ((sc->sc_type == WM_T_82574)
   5167 			    || (sc->sc_type == WM_T_82583)) {
   5168 				/*
   5169 				 * Document says this bit must be set for
   5170 				 * proper operation.
   5171 				 */
   5172 				reg = CSR_READ(sc, WMREG_GCR);
   5173 				reg |= __BIT(22);
   5174 				CSR_WRITE(sc, WMREG_GCR, reg);
   5175 
   5176 				/*
   5177 				 * Apply workaround for hardware errata
   5178 				 * documented in errata docs Fixes issue where
   5179 				 * some error prone or unreliable PCIe
   5180 				 * completions are occurring, particularly
   5181 				 * with ASPM enabled. Without fix, issue can
   5182 				 * cause Tx timeouts.
   5183 				 */
   5184 				reg = CSR_READ(sc, WMREG_GCR2);
   5185 				reg |= __BIT(0);
   5186 				CSR_WRITE(sc, WMREG_GCR2, reg);
   5187 			}
   5188 			break;
   5189 		case WM_T_80003:
   5190 			/* TARC0 */
   5191 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   5192 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   5193 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   5194 
   5195 			/* TARC1 bit 28 */
   5196 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5197 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5198 				tarc1 &= ~__BIT(28);
   5199 			else
   5200 				tarc1 |= __BIT(28);
   5201 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5202 			break;
   5203 		case WM_T_ICH8:
   5204 		case WM_T_ICH9:
   5205 		case WM_T_ICH10:
   5206 		case WM_T_PCH:
   5207 		case WM_T_PCH2:
   5208 		case WM_T_PCH_LPT:
   5209 		case WM_T_PCH_SPT:
   5210 		case WM_T_PCH_CNP:
   5211 		case WM_T_PCH_TGP:
   5212 			/* TARC0 */
   5213 			if (sc->sc_type == WM_T_ICH8) {
   5214 				/* Set TARC0 bits 29 and 28 */
   5215 				tarc0 |= __BITS(29, 28);
   5216 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   5217 				tarc0 |= __BIT(29);
   5218 				/*
   5219 				 *  Drop bit 28. From Linux.
   5220 				 * See I218/I219 spec update
   5221 				 * "5. Buffer Overrun While the I219 is
   5222 				 * Processing DMA Transactions"
   5223 				 */
   5224 				tarc0 &= ~__BIT(28);
   5225 			}
   5226 			/* Set TARC0 bits 23,24,26,27 */
   5227 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   5228 
   5229 			/* CTRL_EXT */
   5230 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5231 			reg |= __BIT(22);	/* Set bit 22 */
   5232 			/*
   5233 			 * Enable PHY low-power state when MAC is at D3
   5234 			 * w/o WoL
   5235 			 */
   5236 			if (sc->sc_type >= WM_T_PCH)
   5237 				reg |= CTRL_EXT_PHYPDEN;
   5238 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5239 
   5240 			/* TARC1 */
   5241 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5242 			/* bit 28 */
   5243 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5244 				tarc1 &= ~__BIT(28);
   5245 			else
   5246 				tarc1 |= __BIT(28);
   5247 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   5248 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5249 
   5250 			/* Device Status */
   5251 			if (sc->sc_type == WM_T_ICH8) {
   5252 				reg = CSR_READ(sc, WMREG_STATUS);
   5253 				reg &= ~__BIT(31);
   5254 				CSR_WRITE(sc, WMREG_STATUS, reg);
   5255 
   5256 			}
   5257 
   5258 			/* IOSFPC */
   5259 			if (sc->sc_type == WM_T_PCH_SPT) {
   5260 				reg = CSR_READ(sc, WMREG_IOSFPC);
   5261 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   5262 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   5263 			}
   5264 			/*
   5265 			 * Work-around descriptor data corruption issue during
   5266 			 * NFS v2 UDP traffic, just disable the NFS filtering
   5267 			 * capability.
   5268 			 */
   5269 			reg = CSR_READ(sc, WMREG_RFCTL);
   5270 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   5271 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5272 			break;
   5273 		default:
   5274 			break;
   5275 		}
   5276 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   5277 
   5278 		switch (sc->sc_type) {
   5279 		case WM_T_82571:
   5280 		case WM_T_82572:
   5281 		case WM_T_82573:
   5282 		case WM_T_80003:
   5283 		case WM_T_ICH8:
   5284 			/*
   5285 			 * 8257[12] Errata No.52, 82573 Errata No.43 and some
   5286 			 * others to avoid RSS Hash Value bug.
   5287 			 */
   5288 			reg = CSR_READ(sc, WMREG_RFCTL);
   5289 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   5290 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5291 			break;
   5292 		case WM_T_82574:
   5293 			/* Use extened Rx descriptor. */
   5294 			reg = CSR_READ(sc, WMREG_RFCTL);
   5295 			reg |= WMREG_RFCTL_EXSTEN;
   5296 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5297 			break;
   5298 		default:
   5299 			break;
   5300 		}
   5301 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   5302 		/*
   5303 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   5304 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   5305 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   5306 		 * Correctly by the Device"
   5307 		 *
   5308 		 * I354(C2000) Errata AVR53:
   5309 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   5310 		 * Hang"
   5311 		 */
   5312 		reg = CSR_READ(sc, WMREG_RFCTL);
   5313 		reg |= WMREG_RFCTL_IPV6EXDIS;
   5314 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   5315 	}
   5316 }
   5317 
   5318 static uint32_t
   5319 wm_rxpbs_adjust_82580(uint32_t val)
   5320 {
   5321 	uint32_t rv = 0;
   5322 
   5323 	if (val < __arraycount(wm_82580_rxpbs_table))
   5324 		rv = wm_82580_rxpbs_table[val];
   5325 
   5326 	return rv;
   5327 }
   5328 
   5329 /*
   5330  * wm_reset_phy:
   5331  *
   5332  *	generic PHY reset function.
   5333  *	Same as e1000_phy_hw_reset_generic()
   5334  */
   5335 static int
   5336 wm_reset_phy(struct wm_softc *sc)
   5337 {
   5338 	uint32_t reg;
   5339 	int rv;
   5340 
   5341 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5342 		device_xname(sc->sc_dev), __func__));
   5343 	if (wm_phy_resetisblocked(sc))
   5344 		return -1;
   5345 
   5346 	rv = sc->phy.acquire(sc);
   5347 	if (rv) {
   5348 		device_printf(sc->sc_dev, "%s: failed to acquire phy: %d\n",
   5349 		    __func__, rv);
   5350 		return rv;
   5351 	}
   5352 
   5353 	reg = CSR_READ(sc, WMREG_CTRL);
   5354 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   5355 	CSR_WRITE_FLUSH(sc);
   5356 
   5357 	delay(sc->phy.reset_delay_us);
   5358 
   5359 	CSR_WRITE(sc, WMREG_CTRL, reg);
   5360 	CSR_WRITE_FLUSH(sc);
   5361 
   5362 	delay(150);
   5363 
   5364 	sc->phy.release(sc);
   5365 
   5366 	wm_get_cfg_done(sc);
   5367 	wm_phy_post_reset(sc);
   5368 
   5369 	return 0;
   5370 }
   5371 
   5372 /*
   5373  * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
   5374  *
   5375  * In i219, the descriptor rings must be emptied before resetting the HW
   5376  * or before changing the device state to D3 during runtime (runtime PM).
   5377  *
   5378  * Failure to do this will cause the HW to enter a unit hang state which can
   5379  * only be released by PCI reset on the device.
   5380  *
   5381  * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
   5382  */
   5383 static void
   5384 wm_flush_desc_rings(struct wm_softc *sc)
   5385 {
   5386 	pcireg_t preg;
   5387 	uint32_t reg;
   5388 	struct wm_txqueue *txq;
   5389 	wiseman_txdesc_t *txd;
   5390 	int nexttx;
   5391 	uint32_t rctl;
   5392 
   5393 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
   5394 
   5395 	/* First, disable MULR fix in FEXTNVM11 */
   5396 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   5397 	reg |= FEXTNVM11_DIS_MULRFIX;
   5398 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   5399 
   5400 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5401 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   5402 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   5403 		return;
   5404 
   5405 	/*
   5406 	 * Remove all descriptors from the tx_ring.
   5407 	 *
   5408 	 * We want to clear all pending descriptors from the TX ring. Zeroing
   5409 	 * happens when the HW reads the regs. We assign the ring itself as
   5410 	 * the data of the next descriptor. We don't care about the data we are
   5411 	 * about to reset the HW.
   5412 	 */
   5413 #ifdef WM_DEBUG
   5414 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
   5415 #endif
   5416 	reg = CSR_READ(sc, WMREG_TCTL);
   5417 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   5418 
   5419 	txq = &sc->sc_queue[0].wmq_txq;
   5420 	nexttx = txq->txq_next;
   5421 	txd = &txq->txq_descs[nexttx];
   5422 	wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
   5423 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   5424 	txd->wtx_fields.wtxu_status = 0;
   5425 	txd->wtx_fields.wtxu_options = 0;
   5426 	txd->wtx_fields.wtxu_vlan = 0;
   5427 
   5428 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5429 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5430 
   5431 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   5432 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   5433 	CSR_WRITE_FLUSH(sc);
   5434 	delay(250);
   5435 
   5436 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5437 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   5438 		return;
   5439 
   5440 	/*
   5441 	 * Mark all descriptors in the RX ring as consumed and disable the
   5442 	 * rx ring.
   5443 	 */
   5444 #ifdef WM_DEBUG
   5445 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   5446 #endif
   5447 	rctl = CSR_READ(sc, WMREG_RCTL);
   5448 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5449 	CSR_WRITE_FLUSH(sc);
   5450 	delay(150);
   5451 
   5452 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   5453 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   5454 	reg &= 0xffffc000;
   5455 	/*
   5456 	 * Update thresholds: prefetch threshold to 31, host threshold
   5457 	 * to 1 and make sure the granularity is "descriptors" and not
   5458 	 * "cache lines"
   5459 	 */
   5460 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   5461 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   5462 
   5463 	/* Momentarily enable the RX ring for the changes to take effect */
   5464 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   5465 	CSR_WRITE_FLUSH(sc);
   5466 	delay(150);
   5467 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5468 }
   5469 
   5470 /*
   5471  * wm_reset:
   5472  *
   5473  *	Reset the i82542 chip.
   5474  */
   5475 static void
   5476 wm_reset(struct wm_softc *sc)
   5477 {
   5478 	int phy_reset = 0;
   5479 	int i, error = 0;
   5480 	uint32_t reg;
   5481 	uint16_t kmreg;
   5482 	int rv;
   5483 
   5484 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5485 		device_xname(sc->sc_dev), __func__));
   5486 	KASSERT(sc->sc_type != 0);
   5487 
   5488 	/*
   5489 	 * Allocate on-chip memory according to the MTU size.
   5490 	 * The Packet Buffer Allocation register must be written
   5491 	 * before the chip is reset.
   5492 	 */
   5493 	switch (sc->sc_type) {
   5494 	case WM_T_82547:
   5495 	case WM_T_82547_2:
   5496 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5497 		    PBA_22K : PBA_30K;
   5498 		for (i = 0; i < sc->sc_nqueues; i++) {
   5499 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5500 			txq->txq_fifo_head = 0;
   5501 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   5502 			txq->txq_fifo_size =
   5503 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   5504 			txq->txq_fifo_stall = 0;
   5505 		}
   5506 		break;
   5507 	case WM_T_82571:
   5508 	case WM_T_82572:
   5509 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   5510 	case WM_T_80003:
   5511 		sc->sc_pba = PBA_32K;
   5512 		break;
   5513 	case WM_T_82573:
   5514 		sc->sc_pba = PBA_12K;
   5515 		break;
   5516 	case WM_T_82574:
   5517 	case WM_T_82583:
   5518 		sc->sc_pba = PBA_20K;
   5519 		break;
   5520 	case WM_T_82576:
   5521 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   5522 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   5523 		break;
   5524 	case WM_T_82580:
   5525 	case WM_T_I350:
   5526 	case WM_T_I354:
   5527 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   5528 		break;
   5529 	case WM_T_I210:
   5530 	case WM_T_I211:
   5531 		sc->sc_pba = PBA_34K;
   5532 		break;
   5533 	case WM_T_ICH8:
   5534 		/* Workaround for a bit corruption issue in FIFO memory */
   5535 		sc->sc_pba = PBA_8K;
   5536 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   5537 		break;
   5538 	case WM_T_ICH9:
   5539 	case WM_T_ICH10:
   5540 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   5541 		    PBA_14K : PBA_10K;
   5542 		break;
   5543 	case WM_T_PCH:
   5544 	case WM_T_PCH2:	/* XXX 14K? */
   5545 	case WM_T_PCH_LPT:
   5546 	case WM_T_PCH_SPT:
   5547 	case WM_T_PCH_CNP:
   5548 	case WM_T_PCH_TGP:
   5549 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
   5550 		    PBA_12K : PBA_26K;
   5551 		break;
   5552 	default:
   5553 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5554 		    PBA_40K : PBA_48K;
   5555 		break;
   5556 	}
   5557 	/*
   5558 	 * Only old or non-multiqueue devices have the PBA register
   5559 	 * XXX Need special handling for 82575.
   5560 	 */
   5561 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5562 	    || (sc->sc_type == WM_T_82575))
   5563 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   5564 
   5565 	/* Prevent the PCI-E bus from sticking */
   5566 	if (sc->sc_flags & WM_F_PCIE) {
   5567 		int timeout = 800;
   5568 
   5569 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   5570 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5571 
   5572 		while (timeout--) {
   5573 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   5574 			    == 0)
   5575 				break;
   5576 			delay(100);
   5577 		}
   5578 		if (timeout == 0)
   5579 			device_printf(sc->sc_dev,
   5580 			    "failed to disable bus mastering\n");
   5581 	}
   5582 
   5583 	/* Set the completion timeout for interface */
   5584 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   5585 	    || (sc->sc_type == WM_T_82580)
   5586 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5587 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   5588 		wm_set_pcie_completion_timeout(sc);
   5589 
   5590 	/* Clear interrupt */
   5591 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5592 	if (wm_is_using_msix(sc)) {
   5593 		if (sc->sc_type != WM_T_82574) {
   5594 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5595 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5596 		} else
   5597 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5598 	}
   5599 
   5600 	/* Stop the transmit and receive processes. */
   5601 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5602 	sc->sc_rctl &= ~RCTL_EN;
   5603 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   5604 	CSR_WRITE_FLUSH(sc);
   5605 
   5606 	/* XXX set_tbi_sbp_82543() */
   5607 
   5608 	delay(10*1000);
   5609 
   5610 	/* Must acquire the MDIO ownership before MAC reset */
   5611 	switch (sc->sc_type) {
   5612 	case WM_T_82573:
   5613 	case WM_T_82574:
   5614 	case WM_T_82583:
   5615 		error = wm_get_hw_semaphore_82573(sc);
   5616 		break;
   5617 	default:
   5618 		break;
   5619 	}
   5620 
   5621 	/*
   5622 	 * 82541 Errata 29? & 82547 Errata 28?
   5623 	 * See also the description about PHY_RST bit in CTRL register
   5624 	 * in 8254x_GBe_SDM.pdf.
   5625 	 */
   5626 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   5627 		CSR_WRITE(sc, WMREG_CTRL,
   5628 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   5629 		CSR_WRITE_FLUSH(sc);
   5630 		delay(5000);
   5631 	}
   5632 
   5633 	switch (sc->sc_type) {
   5634 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   5635 	case WM_T_82541:
   5636 	case WM_T_82541_2:
   5637 	case WM_T_82547:
   5638 	case WM_T_82547_2:
   5639 		/*
   5640 		 * On some chipsets, a reset through a memory-mapped write
   5641 		 * cycle can cause the chip to reset before completing the
   5642 		 * write cycle. This causes major headache that can be avoided
   5643 		 * by issuing the reset via indirect register writes through
   5644 		 * I/O space.
   5645 		 *
   5646 		 * So, if we successfully mapped the I/O BAR at attach time,
   5647 		 * use that. Otherwise, try our luck with a memory-mapped
   5648 		 * reset.
   5649 		 */
   5650 		if (sc->sc_flags & WM_F_IOH_VALID)
   5651 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   5652 		else
   5653 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   5654 		break;
   5655 	case WM_T_82545_3:
   5656 	case WM_T_82546_3:
   5657 		/* Use the shadow control register on these chips. */
   5658 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   5659 		break;
   5660 	case WM_T_80003:
   5661 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5662 		if (sc->phy.acquire(sc) != 0)
   5663 			break;
   5664 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5665 		sc->phy.release(sc);
   5666 		break;
   5667 	case WM_T_ICH8:
   5668 	case WM_T_ICH9:
   5669 	case WM_T_ICH10:
   5670 	case WM_T_PCH:
   5671 	case WM_T_PCH2:
   5672 	case WM_T_PCH_LPT:
   5673 	case WM_T_PCH_SPT:
   5674 	case WM_T_PCH_CNP:
   5675 	case WM_T_PCH_TGP:
   5676 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5677 		if (wm_phy_resetisblocked(sc) == false) {
   5678 			/*
   5679 			 * Gate automatic PHY configuration by hardware on
   5680 			 * non-managed 82579
   5681 			 */
   5682 			if ((sc->sc_type == WM_T_PCH2)
   5683 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   5684 				== 0))
   5685 				wm_gate_hw_phy_config_ich8lan(sc, true);
   5686 
   5687 			reg |= CTRL_PHY_RESET;
   5688 			phy_reset = 1;
   5689 		} else
   5690 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   5691 		if (sc->phy.acquire(sc) != 0)
   5692 			break;
   5693 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5694 		/* Don't insert a completion barrier when reset */
   5695 		delay(20*1000);
   5696 		/*
   5697 		 * The EXTCNFCTR_MDIO_SW_OWNERSHIP bit is cleared by the reset,
   5698 		 * so don't use sc->phy.release(sc). Release sc_ich_phymtx
   5699 		 * only. See also wm_get_swflag_ich8lan().
   5700 		 */
   5701 		mutex_exit(sc->sc_ich_phymtx);
   5702 		break;
   5703 	case WM_T_82580:
   5704 	case WM_T_I350:
   5705 	case WM_T_I354:
   5706 	case WM_T_I210:
   5707 	case WM_T_I211:
   5708 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5709 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5710 			CSR_WRITE_FLUSH(sc);
   5711 		delay(5000);
   5712 		break;
   5713 	case WM_T_82542_2_0:
   5714 	case WM_T_82542_2_1:
   5715 	case WM_T_82543:
   5716 	case WM_T_82540:
   5717 	case WM_T_82545:
   5718 	case WM_T_82546:
   5719 	case WM_T_82571:
   5720 	case WM_T_82572:
   5721 	case WM_T_82573:
   5722 	case WM_T_82574:
   5723 	case WM_T_82575:
   5724 	case WM_T_82576:
   5725 	case WM_T_82583:
   5726 	default:
   5727 		/* Everything else can safely use the documented method. */
   5728 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5729 		break;
   5730 	}
   5731 
   5732 	/* Must release the MDIO ownership after MAC reset */
   5733 	switch (sc->sc_type) {
   5734 	case WM_T_82573:
   5735 	case WM_T_82574:
   5736 	case WM_T_82583:
   5737 		if (error == 0)
   5738 			wm_put_hw_semaphore_82573(sc);
   5739 		break;
   5740 	default:
   5741 		break;
   5742 	}
   5743 
   5744 	/* Set Phy Config Counter to 50msec */
   5745 	if (sc->sc_type == WM_T_PCH2) {
   5746 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5747 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5748 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5749 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5750 	}
   5751 
   5752 	if (phy_reset != 0)
   5753 		wm_get_cfg_done(sc);
   5754 
   5755 	/* Reload EEPROM */
   5756 	switch (sc->sc_type) {
   5757 	case WM_T_82542_2_0:
   5758 	case WM_T_82542_2_1:
   5759 	case WM_T_82543:
   5760 	case WM_T_82544:
   5761 		delay(10);
   5762 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5763 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5764 		CSR_WRITE_FLUSH(sc);
   5765 		delay(2000);
   5766 		break;
   5767 	case WM_T_82540:
   5768 	case WM_T_82545:
   5769 	case WM_T_82545_3:
   5770 	case WM_T_82546:
   5771 	case WM_T_82546_3:
   5772 		delay(5*1000);
   5773 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5774 		break;
   5775 	case WM_T_82541:
   5776 	case WM_T_82541_2:
   5777 	case WM_T_82547:
   5778 	case WM_T_82547_2:
   5779 		delay(20000);
   5780 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5781 		break;
   5782 	case WM_T_82571:
   5783 	case WM_T_82572:
   5784 	case WM_T_82573:
   5785 	case WM_T_82574:
   5786 	case WM_T_82583:
   5787 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5788 			delay(10);
   5789 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5790 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5791 			CSR_WRITE_FLUSH(sc);
   5792 		}
   5793 		/* check EECD_EE_AUTORD */
   5794 		wm_get_auto_rd_done(sc);
   5795 		/*
   5796 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5797 		 * is set.
   5798 		 */
   5799 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5800 		    || (sc->sc_type == WM_T_82583))
   5801 			delay(25*1000);
   5802 		break;
   5803 	case WM_T_82575:
   5804 	case WM_T_82576:
   5805 	case WM_T_82580:
   5806 	case WM_T_I350:
   5807 	case WM_T_I354:
   5808 	case WM_T_I210:
   5809 	case WM_T_I211:
   5810 	case WM_T_80003:
   5811 		/* check EECD_EE_AUTORD */
   5812 		wm_get_auto_rd_done(sc);
   5813 		break;
   5814 	case WM_T_ICH8:
   5815 	case WM_T_ICH9:
   5816 	case WM_T_ICH10:
   5817 	case WM_T_PCH:
   5818 	case WM_T_PCH2:
   5819 	case WM_T_PCH_LPT:
   5820 	case WM_T_PCH_SPT:
   5821 	case WM_T_PCH_CNP:
   5822 	case WM_T_PCH_TGP:
   5823 		break;
   5824 	default:
   5825 		panic("%s: unknown type\n", __func__);
   5826 	}
   5827 
   5828 	/* Check whether EEPROM is present or not */
   5829 	switch (sc->sc_type) {
   5830 	case WM_T_82575:
   5831 	case WM_T_82576:
   5832 	case WM_T_82580:
   5833 	case WM_T_I350:
   5834 	case WM_T_I354:
   5835 	case WM_T_ICH8:
   5836 	case WM_T_ICH9:
   5837 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5838 			/* Not found */
   5839 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5840 			if (sc->sc_type == WM_T_82575)
   5841 				wm_reset_init_script_82575(sc);
   5842 		}
   5843 		break;
   5844 	default:
   5845 		break;
   5846 	}
   5847 
   5848 	if (phy_reset != 0)
   5849 		wm_phy_post_reset(sc);
   5850 
   5851 	if ((sc->sc_type == WM_T_82580)
   5852 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5853 		/* Clear global device reset status bit */
   5854 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5855 	}
   5856 
   5857 	/* Clear any pending interrupt events. */
   5858 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5859 	reg = CSR_READ(sc, WMREG_ICR);
   5860 	if (wm_is_using_msix(sc)) {
   5861 		if (sc->sc_type != WM_T_82574) {
   5862 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5863 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5864 		} else
   5865 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5866 	}
   5867 
   5868 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5869 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5870 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5871 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)
   5872 	    || (sc->sc_type == WM_T_PCH_TGP)) {
   5873 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5874 		reg |= KABGTXD_BGSQLBIAS;
   5875 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5876 	}
   5877 
   5878 	/* Reload sc_ctrl */
   5879 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5880 
   5881 	wm_set_eee(sc);
   5882 
   5883 	/*
   5884 	 * For PCH, this write will make sure that any noise will be detected
   5885 	 * as a CRC error and be dropped rather than show up as a bad packet
   5886 	 * to the DMA engine
   5887 	 */
   5888 	if (sc->sc_type == WM_T_PCH)
   5889 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5890 
   5891 	if (sc->sc_type >= WM_T_82544)
   5892 		CSR_WRITE(sc, WMREG_WUC, 0);
   5893 
   5894 	if (sc->sc_type < WM_T_82575)
   5895 		wm_disable_aspm(sc); /* Workaround for some chips */
   5896 
   5897 	wm_reset_mdicnfg_82580(sc);
   5898 
   5899 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5900 		wm_pll_workaround_i210(sc);
   5901 
   5902 	if (sc->sc_type == WM_T_80003) {
   5903 		/* Default to TRUE to enable the MDIC W/A */
   5904 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5905 
   5906 		rv = wm_kmrn_readreg(sc,
   5907 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5908 		if (rv == 0) {
   5909 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5910 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5911 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5912 			else
   5913 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5914 		}
   5915 	}
   5916 }
   5917 
   5918 /*
   5919  * wm_add_rxbuf:
   5920  *
   5921  *	Add a receive buffer to the indiciated descriptor.
   5922  */
   5923 static int
   5924 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5925 {
   5926 	struct wm_softc *sc = rxq->rxq_sc;
   5927 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5928 	struct mbuf *m;
   5929 	int error;
   5930 
   5931 	KASSERT(mutex_owned(rxq->rxq_lock));
   5932 
   5933 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5934 	if (m == NULL)
   5935 		return ENOBUFS;
   5936 
   5937 	MCLGET(m, M_DONTWAIT);
   5938 	if ((m->m_flags & M_EXT) == 0) {
   5939 		m_freem(m);
   5940 		return ENOBUFS;
   5941 	}
   5942 
   5943 	if (rxs->rxs_mbuf != NULL)
   5944 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5945 
   5946 	rxs->rxs_mbuf = m;
   5947 
   5948 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5949 	/*
   5950 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5951 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5952 	 */
   5953 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5954 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5955 	if (error) {
   5956 		/* XXX XXX XXX */
   5957 		aprint_error_dev(sc->sc_dev,
   5958 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5959 		panic("wm_add_rxbuf");
   5960 	}
   5961 
   5962 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5963 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5964 
   5965 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5966 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5967 			wm_init_rxdesc(rxq, idx);
   5968 	} else
   5969 		wm_init_rxdesc(rxq, idx);
   5970 
   5971 	return 0;
   5972 }
   5973 
   5974 /*
   5975  * wm_rxdrain:
   5976  *
   5977  *	Drain the receive queue.
   5978  */
   5979 static void
   5980 wm_rxdrain(struct wm_rxqueue *rxq)
   5981 {
   5982 	struct wm_softc *sc = rxq->rxq_sc;
   5983 	struct wm_rxsoft *rxs;
   5984 	int i;
   5985 
   5986 	KASSERT(mutex_owned(rxq->rxq_lock));
   5987 
   5988 	for (i = 0; i < WM_NRXDESC; i++) {
   5989 		rxs = &rxq->rxq_soft[i];
   5990 		if (rxs->rxs_mbuf != NULL) {
   5991 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5992 			m_freem(rxs->rxs_mbuf);
   5993 			rxs->rxs_mbuf = NULL;
   5994 		}
   5995 	}
   5996 }
   5997 
   5998 /*
   5999  * Setup registers for RSS.
   6000  *
   6001  * XXX not yet VMDq support
   6002  */
   6003 static void
   6004 wm_init_rss(struct wm_softc *sc)
   6005 {
   6006 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   6007 	int i;
   6008 
   6009 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   6010 
   6011 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   6012 		unsigned int qid, reta_ent;
   6013 
   6014 		qid  = i % sc->sc_nqueues;
   6015 		switch (sc->sc_type) {
   6016 		case WM_T_82574:
   6017 			reta_ent = __SHIFTIN(qid,
   6018 			    RETA_ENT_QINDEX_MASK_82574);
   6019 			break;
   6020 		case WM_T_82575:
   6021 			reta_ent = __SHIFTIN(qid,
   6022 			    RETA_ENT_QINDEX1_MASK_82575);
   6023 			break;
   6024 		default:
   6025 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   6026 			break;
   6027 		}
   6028 
   6029 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   6030 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   6031 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   6032 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   6033 	}
   6034 
   6035 	rss_getkey((uint8_t *)rss_key);
   6036 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   6037 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   6038 
   6039 	if (sc->sc_type == WM_T_82574)
   6040 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   6041 	else
   6042 		mrqc = MRQC_ENABLE_RSS_MQ;
   6043 
   6044 	/*
   6045 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   6046 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   6047 	 */
   6048 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   6049 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   6050 #if 0
   6051 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   6052 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   6053 #endif
   6054 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   6055 
   6056 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   6057 }
   6058 
   6059 /*
   6060  * Adjust TX and RX queue numbers which the system actulally uses.
   6061  *
   6062  * The numbers are affected by below parameters.
   6063  *     - The nubmer of hardware queues
   6064  *     - The number of MSI-X vectors (= "nvectors" argument)
   6065  *     - ncpu
   6066  */
   6067 static void
   6068 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   6069 {
   6070 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   6071 
   6072 	if (nvectors < 2) {
   6073 		sc->sc_nqueues = 1;
   6074 		return;
   6075 	}
   6076 
   6077 	switch (sc->sc_type) {
   6078 	case WM_T_82572:
   6079 		hw_ntxqueues = 2;
   6080 		hw_nrxqueues = 2;
   6081 		break;
   6082 	case WM_T_82574:
   6083 		hw_ntxqueues = 2;
   6084 		hw_nrxqueues = 2;
   6085 		break;
   6086 	case WM_T_82575:
   6087 		hw_ntxqueues = 4;
   6088 		hw_nrxqueues = 4;
   6089 		break;
   6090 	case WM_T_82576:
   6091 		hw_ntxqueues = 16;
   6092 		hw_nrxqueues = 16;
   6093 		break;
   6094 	case WM_T_82580:
   6095 	case WM_T_I350:
   6096 	case WM_T_I354:
   6097 		hw_ntxqueues = 8;
   6098 		hw_nrxqueues = 8;
   6099 		break;
   6100 	case WM_T_I210:
   6101 		hw_ntxqueues = 4;
   6102 		hw_nrxqueues = 4;
   6103 		break;
   6104 	case WM_T_I211:
   6105 		hw_ntxqueues = 2;
   6106 		hw_nrxqueues = 2;
   6107 		break;
   6108 		/*
   6109 		 * The below Ethernet controllers do not support MSI-X;
   6110 		 * this driver doesn't let them use multiqueue.
   6111 		 *     - WM_T_80003
   6112 		 *     - WM_T_ICH8
   6113 		 *     - WM_T_ICH9
   6114 		 *     - WM_T_ICH10
   6115 		 *     - WM_T_PCH
   6116 		 *     - WM_T_PCH2
   6117 		 *     - WM_T_PCH_LPT
   6118 		 */
   6119 	default:
   6120 		hw_ntxqueues = 1;
   6121 		hw_nrxqueues = 1;
   6122 		break;
   6123 	}
   6124 
   6125 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   6126 
   6127 	/*
   6128 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   6129 	 * the number of queues used actually.
   6130 	 */
   6131 	if (nvectors < hw_nqueues + 1)
   6132 		sc->sc_nqueues = nvectors - 1;
   6133 	else
   6134 		sc->sc_nqueues = hw_nqueues;
   6135 
   6136 	/*
   6137 	 * As queues more than CPUs cannot improve scaling, we limit
   6138 	 * the number of queues used actually.
   6139 	 */
   6140 	if (ncpu < sc->sc_nqueues)
   6141 		sc->sc_nqueues = ncpu;
   6142 }
   6143 
   6144 static inline bool
   6145 wm_is_using_msix(struct wm_softc *sc)
   6146 {
   6147 
   6148 	return (sc->sc_nintrs > 1);
   6149 }
   6150 
   6151 static inline bool
   6152 wm_is_using_multiqueue(struct wm_softc *sc)
   6153 {
   6154 
   6155 	return (sc->sc_nqueues > 1);
   6156 }
   6157 
   6158 static int
   6159 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   6160 {
   6161 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   6162 
   6163 	wmq->wmq_id = qidx;
   6164 	wmq->wmq_intr_idx = intr_idx;
   6165 	wmq->wmq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
   6166 	    wm_handle_queue, wmq);
   6167 	if (wmq->wmq_si != NULL)
   6168 		return 0;
   6169 
   6170 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   6171 	    wmq->wmq_id);
   6172 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   6173 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6174 	return ENOMEM;
   6175 }
   6176 
   6177 /*
   6178  * Both single interrupt MSI and INTx can use this function.
   6179  */
   6180 static int
   6181 wm_setup_legacy(struct wm_softc *sc)
   6182 {
   6183 	pci_chipset_tag_t pc = sc->sc_pc;
   6184 	const char *intrstr = NULL;
   6185 	char intrbuf[PCI_INTRSTR_LEN];
   6186 	int error;
   6187 
   6188 	error = wm_alloc_txrx_queues(sc);
   6189 	if (error) {
   6190 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6191 		    error);
   6192 		return ENOMEM;
   6193 	}
   6194 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   6195 	    sizeof(intrbuf));
   6196 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   6197 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   6198 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   6199 	if (sc->sc_ihs[0] == NULL) {
   6200 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   6201 		    (pci_intr_type(pc, sc->sc_intrs[0])
   6202 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   6203 		return ENOMEM;
   6204 	}
   6205 
   6206 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   6207 	sc->sc_nintrs = 1;
   6208 
   6209 	return wm_softint_establish_queue(sc, 0, 0);
   6210 }
   6211 
   6212 static int
   6213 wm_setup_msix(struct wm_softc *sc)
   6214 {
   6215 	void *vih;
   6216 	kcpuset_t *affinity;
   6217 	int qidx, error, intr_idx, txrx_established;
   6218 	pci_chipset_tag_t pc = sc->sc_pc;
   6219 	const char *intrstr = NULL;
   6220 	char intrbuf[PCI_INTRSTR_LEN];
   6221 	char intr_xname[INTRDEVNAMEBUF];
   6222 
   6223 	if (sc->sc_nqueues < ncpu) {
   6224 		/*
   6225 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   6226 		 * interrupts start from CPU#1.
   6227 		 */
   6228 		sc->sc_affinity_offset = 1;
   6229 	} else {
   6230 		/*
   6231 		 * In this case, this device use all CPUs. So, we unify
   6232 		 * affinitied cpu_index to msix vector number for readability.
   6233 		 */
   6234 		sc->sc_affinity_offset = 0;
   6235 	}
   6236 
   6237 	error = wm_alloc_txrx_queues(sc);
   6238 	if (error) {
   6239 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6240 		    error);
   6241 		return ENOMEM;
   6242 	}
   6243 
   6244 	kcpuset_create(&affinity, false);
   6245 	intr_idx = 0;
   6246 
   6247 	/*
   6248 	 * TX and RX
   6249 	 */
   6250 	txrx_established = 0;
   6251 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6252 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6253 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   6254 
   6255 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6256 		    sizeof(intrbuf));
   6257 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   6258 		    PCI_INTR_MPSAFE, true);
   6259 		memset(intr_xname, 0, sizeof(intr_xname));
   6260 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   6261 		    device_xname(sc->sc_dev), qidx);
   6262 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6263 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   6264 		if (vih == NULL) {
   6265 			aprint_error_dev(sc->sc_dev,
   6266 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   6267 			    intrstr ? " at " : "",
   6268 			    intrstr ? intrstr : "");
   6269 
   6270 			goto fail;
   6271 		}
   6272 		kcpuset_zero(affinity);
   6273 		/* Round-robin affinity */
   6274 		kcpuset_set(affinity, affinity_to);
   6275 		error = interrupt_distribute(vih, affinity, NULL);
   6276 		if (error == 0) {
   6277 			aprint_normal_dev(sc->sc_dev,
   6278 			    "for TX and RX interrupting at %s affinity to %u\n",
   6279 			    intrstr, affinity_to);
   6280 		} else {
   6281 			aprint_normal_dev(sc->sc_dev,
   6282 			    "for TX and RX interrupting at %s\n", intrstr);
   6283 		}
   6284 		sc->sc_ihs[intr_idx] = vih;
   6285 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   6286 			goto fail;
   6287 		txrx_established++;
   6288 		intr_idx++;
   6289 	}
   6290 
   6291 	/* LINK */
   6292 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6293 	    sizeof(intrbuf));
   6294 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   6295 	memset(intr_xname, 0, sizeof(intr_xname));
   6296 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   6297 	    device_xname(sc->sc_dev));
   6298 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6299 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   6300 	if (vih == NULL) {
   6301 		aprint_error_dev(sc->sc_dev,
   6302 		    "unable to establish MSI-X(for LINK)%s%s\n",
   6303 		    intrstr ? " at " : "",
   6304 		    intrstr ? intrstr : "");
   6305 
   6306 		goto fail;
   6307 	}
   6308 	/* Keep default affinity to LINK interrupt */
   6309 	aprint_normal_dev(sc->sc_dev,
   6310 	    "for LINK interrupting at %s\n", intrstr);
   6311 	sc->sc_ihs[intr_idx] = vih;
   6312 	sc->sc_link_intr_idx = intr_idx;
   6313 
   6314 	sc->sc_nintrs = sc->sc_nqueues + 1;
   6315 	kcpuset_destroy(affinity);
   6316 	return 0;
   6317 
   6318 fail:
   6319 	for (qidx = 0; qidx < txrx_established; qidx++) {
   6320 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6321 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   6322 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6323 	}
   6324 
   6325 	kcpuset_destroy(affinity);
   6326 	return ENOMEM;
   6327 }
   6328 
   6329 static void
   6330 wm_unset_stopping_flags(struct wm_softc *sc)
   6331 {
   6332 	int i;
   6333 
   6334 	KASSERT(mutex_owned(sc->sc_core_lock));
   6335 
   6336 	/* Must unset stopping flags in ascending order. */
   6337 	for (i = 0; i < sc->sc_nqueues; i++) {
   6338 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6339 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6340 
   6341 		mutex_enter(txq->txq_lock);
   6342 		txq->txq_stopping = false;
   6343 		mutex_exit(txq->txq_lock);
   6344 
   6345 		mutex_enter(rxq->rxq_lock);
   6346 		rxq->rxq_stopping = false;
   6347 		mutex_exit(rxq->rxq_lock);
   6348 	}
   6349 
   6350 	sc->sc_core_stopping = false;
   6351 }
   6352 
   6353 static void
   6354 wm_set_stopping_flags(struct wm_softc *sc)
   6355 {
   6356 	int i;
   6357 
   6358 	KASSERT(mutex_owned(sc->sc_core_lock));
   6359 
   6360 	sc->sc_core_stopping = true;
   6361 
   6362 	/* Must set stopping flags in ascending order. */
   6363 	for (i = 0; i < sc->sc_nqueues; i++) {
   6364 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6365 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6366 
   6367 		mutex_enter(rxq->rxq_lock);
   6368 		rxq->rxq_stopping = true;
   6369 		mutex_exit(rxq->rxq_lock);
   6370 
   6371 		mutex_enter(txq->txq_lock);
   6372 		txq->txq_stopping = true;
   6373 		mutex_exit(txq->txq_lock);
   6374 	}
   6375 }
   6376 
   6377 /*
   6378  * Write interrupt interval value to ITR or EITR
   6379  */
   6380 static void
   6381 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   6382 {
   6383 
   6384 	if (!wmq->wmq_set_itr)
   6385 		return;
   6386 
   6387 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6388 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   6389 
   6390 		/*
   6391 		 * 82575 doesn't have CNT_INGR field.
   6392 		 * So, overwrite counter field by software.
   6393 		 */
   6394 		if (sc->sc_type == WM_T_82575)
   6395 			eitr |= __SHIFTIN(wmq->wmq_itr,
   6396 			    EITR_COUNTER_MASK_82575);
   6397 		else
   6398 			eitr |= EITR_CNT_INGR;
   6399 
   6400 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   6401 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   6402 		/*
   6403 		 * 82574 has both ITR and EITR. SET EITR when we use
   6404 		 * the multi queue function with MSI-X.
   6405 		 */
   6406 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   6407 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   6408 	} else {
   6409 		KASSERT(wmq->wmq_id == 0);
   6410 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   6411 	}
   6412 
   6413 	wmq->wmq_set_itr = false;
   6414 }
   6415 
   6416 /*
   6417  * TODO
   6418  * Below dynamic calculation of itr is almost the same as Linux igb,
   6419  * however it does not fit to wm(4). So, we will have been disable AIM
   6420  * until we will find appropriate calculation of itr.
   6421  */
   6422 /*
   6423  * Calculate interrupt interval value to be going to write register in
   6424  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   6425  */
   6426 static void
   6427 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   6428 {
   6429 #ifdef NOTYET
   6430 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6431 	struct wm_txqueue *txq = &wmq->wmq_txq;
   6432 	uint32_t avg_size = 0;
   6433 	uint32_t new_itr;
   6434 
   6435 	if (rxq->rxq_packets)
   6436 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   6437 	if (txq->txq_packets)
   6438 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   6439 
   6440 	if (avg_size == 0) {
   6441 		new_itr = 450; /* restore default value */
   6442 		goto out;
   6443 	}
   6444 
   6445 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   6446 	avg_size += 24;
   6447 
   6448 	/* Don't starve jumbo frames */
   6449 	avg_size = uimin(avg_size, 3000);
   6450 
   6451 	/* Give a little boost to mid-size frames */
   6452 	if ((avg_size > 300) && (avg_size < 1200))
   6453 		new_itr = avg_size / 3;
   6454 	else
   6455 		new_itr = avg_size / 2;
   6456 
   6457 out:
   6458 	/*
   6459 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   6460 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   6461 	 */
   6462 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   6463 		new_itr *= 4;
   6464 
   6465 	if (new_itr != wmq->wmq_itr) {
   6466 		wmq->wmq_itr = new_itr;
   6467 		wmq->wmq_set_itr = true;
   6468 	} else
   6469 		wmq->wmq_set_itr = false;
   6470 
   6471 	rxq->rxq_packets = 0;
   6472 	rxq->rxq_bytes = 0;
   6473 	txq->txq_packets = 0;
   6474 	txq->txq_bytes = 0;
   6475 #endif
   6476 }
   6477 
   6478 static void
   6479 wm_init_sysctls(struct wm_softc *sc)
   6480 {
   6481 	struct sysctllog **log;
   6482 	const struct sysctlnode *rnode, *qnode, *cnode;
   6483 	int i, rv;
   6484 	const char *dvname;
   6485 
   6486 	log = &sc->sc_sysctllog;
   6487 	dvname = device_xname(sc->sc_dev);
   6488 
   6489 	rv = sysctl_createv(log, 0, NULL, &rnode,
   6490 	    0, CTLTYPE_NODE, dvname,
   6491 	    SYSCTL_DESCR("wm information and settings"),
   6492 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   6493 	if (rv != 0)
   6494 		goto err;
   6495 
   6496 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6497 	    CTLTYPE_BOOL, "txrx_workqueue",
   6498 	    SYSCTL_DESCR("Use workqueue for packet processing"),
   6499 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   6500 	if (rv != 0)
   6501 		goto teardown;
   6502 
   6503 	for (i = 0; i < sc->sc_nqueues; i++) {
   6504 		struct wm_queue *wmq = &sc->sc_queue[i];
   6505 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6506 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6507 
   6508 		snprintf(sc->sc_queue[i].sysctlname,
   6509 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
   6510 
   6511 		if (sysctl_createv(log, 0, &rnode, &qnode,
   6512 		    0, CTLTYPE_NODE,
   6513 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
   6514 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   6515 			break;
   6516 
   6517 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6518 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6519 		    "txq_free", SYSCTL_DESCR("TX queue free"),
   6520 		    NULL, 0, &txq->txq_free,
   6521 		    0, CTL_CREATE, CTL_EOL) != 0)
   6522 			break;
   6523 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6524 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6525 		    "txd_head", SYSCTL_DESCR("TX descriptor head"),
   6526 		    wm_sysctl_tdh_handler, 0, (void *)txq,
   6527 		    0, CTL_CREATE, CTL_EOL) != 0)
   6528 			break;
   6529 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6530 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6531 		    "txd_tail", SYSCTL_DESCR("TX descriptor tail"),
   6532 		    wm_sysctl_tdt_handler, 0, (void *)txq,
   6533 		    0, CTL_CREATE, CTL_EOL) != 0)
   6534 			break;
   6535 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6536 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6537 		    "txq_next", SYSCTL_DESCR("TX queue next"),
   6538 		    NULL, 0, &txq->txq_next,
   6539 		    0, CTL_CREATE, CTL_EOL) != 0)
   6540 			break;
   6541 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6542 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6543 		    "txq_sfree", SYSCTL_DESCR("TX queue sfree"),
   6544 		    NULL, 0, &txq->txq_sfree,
   6545 		    0, CTL_CREATE, CTL_EOL) != 0)
   6546 			break;
   6547 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6548 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6549 		    "txq_snext", SYSCTL_DESCR("TX queue snext"),
   6550 		    NULL, 0, &txq->txq_snext,
   6551 		    0, CTL_CREATE, CTL_EOL) != 0)
   6552 			break;
   6553 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6554 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6555 		    "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"),
   6556 		    NULL, 0, &txq->txq_sdirty,
   6557 		    0, CTL_CREATE, CTL_EOL) != 0)
   6558 			break;
   6559 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6560 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6561 		    "txq_flags", SYSCTL_DESCR("TX queue flags"),
   6562 		    NULL, 0, &txq->txq_flags,
   6563 		    0, CTL_CREATE, CTL_EOL) != 0)
   6564 			break;
   6565 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6566 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6567 		    "txq_stopping", SYSCTL_DESCR("TX queue stopping"),
   6568 		    NULL, 0, &txq->txq_stopping,
   6569 		    0, CTL_CREATE, CTL_EOL) != 0)
   6570 			break;
   6571 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6572 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6573 		    "txq_sending", SYSCTL_DESCR("TX queue sending"),
   6574 		    NULL, 0, &txq->txq_sending,
   6575 		    0, CTL_CREATE, CTL_EOL) != 0)
   6576 			break;
   6577 
   6578 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6579 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6580 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
   6581 		    NULL, 0, &rxq->rxq_ptr,
   6582 		    0, CTL_CREATE, CTL_EOL) != 0)
   6583 			break;
   6584 	}
   6585 
   6586 #ifdef WM_DEBUG
   6587 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6588 	    CTLTYPE_INT, "debug_flags",
   6589 	    SYSCTL_DESCR(
   6590 		    "Debug flags:\n"	\
   6591 		    "\t0x01 LINK\n"	\
   6592 		    "\t0x02 TX\n"	\
   6593 		    "\t0x04 RX\n"	\
   6594 		    "\t0x08 GMII\n"	\
   6595 		    "\t0x10 MANAGE\n"	\
   6596 		    "\t0x20 NVM\n"	\
   6597 		    "\t0x40 INIT\n"	\
   6598 		    "\t0x80 LOCK"),
   6599 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
   6600 	if (rv != 0)
   6601 		goto teardown;
   6602 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6603 	    CTLTYPE_BOOL, "trigger_reset",
   6604 	    SYSCTL_DESCR("Trigger an interface reset"),
   6605 	    NULL, 0, &sc->sc_trigger_reset, 0, CTL_CREATE, CTL_EOL);
   6606 	if (rv != 0)
   6607 		goto teardown;
   6608 #endif
   6609 
   6610 	return;
   6611 
   6612 teardown:
   6613 	sysctl_teardown(log);
   6614 err:
   6615 	sc->sc_sysctllog = NULL;
   6616 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   6617 	    __func__, rv);
   6618 }
   6619 
   6620 static void
   6621 wm_update_stats(struct wm_softc *sc)
   6622 {
   6623 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6624 	uint64_t crcerrs, algnerrc, symerrc, mpc, colc,  sec, rlec, rxerrc,
   6625 	    cexterr;
   6626 
   6627 	crcerrs = CSR_READ(sc, WMREG_CRCERRS);
   6628 	symerrc = CSR_READ(sc, WMREG_SYMERRC);
   6629 	mpc = CSR_READ(sc, WMREG_MPC);
   6630 	colc = CSR_READ(sc, WMREG_COLC);
   6631 	sec = CSR_READ(sc, WMREG_SEC);
   6632 	rlec = CSR_READ(sc, WMREG_RLEC);
   6633 
   6634 	WM_EVCNT_ADD(&sc->sc_ev_crcerrs, crcerrs);
   6635 	WM_EVCNT_ADD(&sc->sc_ev_symerrc, symerrc);
   6636 	WM_EVCNT_ADD(&sc->sc_ev_mpc, mpc);
   6637 	WM_EVCNT_ADD(&sc->sc_ev_colc, colc);
   6638 	WM_EVCNT_ADD(&sc->sc_ev_sec, sec);
   6639 	WM_EVCNT_ADD(&sc->sc_ev_rlec, rlec);
   6640 
   6641 	if (sc->sc_type >= WM_T_82543) {
   6642 		algnerrc = CSR_READ(sc, WMREG_ALGNERRC);
   6643 		rxerrc = CSR_READ(sc, WMREG_RXERRC);
   6644 		WM_EVCNT_ADD(&sc->sc_ev_algnerrc, algnerrc);
   6645 		WM_EVCNT_ADD(&sc->sc_ev_rxerrc, rxerrc);
   6646 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc)) {
   6647 			cexterr = CSR_READ(sc, WMREG_CEXTERR);
   6648 			WM_EVCNT_ADD(&sc->sc_ev_cexterr, cexterr);
   6649 		} else {
   6650 			cexterr = 0;
   6651 			/* Excessive collision + Link down */
   6652 			WM_EVCNT_ADD(&sc->sc_ev_htdpmc,
   6653 			    CSR_READ(sc, WMREG_HTDPMC));
   6654 		}
   6655 
   6656 		WM_EVCNT_ADD(&sc->sc_ev_tncrs, CSR_READ(sc, WMREG_TNCRS));
   6657 		WM_EVCNT_ADD(&sc->sc_ev_tsctc, CSR_READ(sc, WMREG_TSCTC));
   6658 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   6659 			WM_EVCNT_ADD(&sc->sc_ev_tsctfc,
   6660 			    CSR_READ(sc, WMREG_TSCTFC));
   6661 		else {
   6662 			WM_EVCNT_ADD(&sc->sc_ev_cbrdpc,
   6663 			    CSR_READ(sc, WMREG_CBRDPC));
   6664 			WM_EVCNT_ADD(&sc->sc_ev_cbrmpc,
   6665 			    CSR_READ(sc, WMREG_CBRMPC));
   6666 		}
   6667 	} else
   6668 		algnerrc = rxerrc = cexterr = 0;
   6669 
   6670 	if (sc->sc_type >= WM_T_82542_2_1) {
   6671 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   6672 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   6673 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   6674 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   6675 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   6676 	}
   6677 
   6678 	WM_EVCNT_ADD(&sc->sc_ev_scc, CSR_READ(sc, WMREG_SCC));
   6679 	WM_EVCNT_ADD(&sc->sc_ev_ecol, CSR_READ(sc, WMREG_ECOL));
   6680 	WM_EVCNT_ADD(&sc->sc_ev_mcc, CSR_READ(sc, WMREG_MCC));
   6681 	WM_EVCNT_ADD(&sc->sc_ev_latecol, CSR_READ(sc, WMREG_LATECOL));
   6682 
   6683 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   6684 		WM_EVCNT_ADD(&sc->sc_ev_cbtmpc, CSR_READ(sc, WMREG_CBTMPC));
   6685 	}
   6686 
   6687 	WM_EVCNT_ADD(&sc->sc_ev_dc, CSR_READ(sc, WMREG_DC));
   6688 	WM_EVCNT_ADD(&sc->sc_ev_prc64, CSR_READ(sc, WMREG_PRC64));
   6689 	WM_EVCNT_ADD(&sc->sc_ev_prc127, CSR_READ(sc, WMREG_PRC127));
   6690 	WM_EVCNT_ADD(&sc->sc_ev_prc255, CSR_READ(sc, WMREG_PRC255));
   6691 	WM_EVCNT_ADD(&sc->sc_ev_prc511, CSR_READ(sc, WMREG_PRC511));
   6692 	WM_EVCNT_ADD(&sc->sc_ev_prc1023, CSR_READ(sc, WMREG_PRC1023));
   6693 	WM_EVCNT_ADD(&sc->sc_ev_prc1522, CSR_READ(sc, WMREG_PRC1522));
   6694 	WM_EVCNT_ADD(&sc->sc_ev_gprc, CSR_READ(sc, WMREG_GPRC));
   6695 	WM_EVCNT_ADD(&sc->sc_ev_bprc, CSR_READ(sc, WMREG_BPRC));
   6696 	WM_EVCNT_ADD(&sc->sc_ev_mprc, CSR_READ(sc, WMREG_MPRC));
   6697 	WM_EVCNT_ADD(&sc->sc_ev_gptc, CSR_READ(sc, WMREG_GPTC));
   6698 
   6699 	WM_EVCNT_ADD(&sc->sc_ev_gorc,
   6700 	    CSR_READ(sc, WMREG_GORCL) +
   6701 	    ((uint64_t)CSR_READ(sc, WMREG_GORCH) << 32));
   6702 	WM_EVCNT_ADD(&sc->sc_ev_gotc,
   6703 	    CSR_READ(sc, WMREG_GOTCL) +
   6704 	    ((uint64_t)CSR_READ(sc, WMREG_GOTCH) << 32));
   6705 
   6706 	WM_EVCNT_ADD(&sc->sc_ev_rnbc, CSR_READ(sc, WMREG_RNBC));
   6707 	WM_EVCNT_ADD(&sc->sc_ev_ruc, CSR_READ(sc, WMREG_RUC));
   6708 	WM_EVCNT_ADD(&sc->sc_ev_rfc, CSR_READ(sc, WMREG_RFC));
   6709 	WM_EVCNT_ADD(&sc->sc_ev_roc, CSR_READ(sc, WMREG_ROC));
   6710 	WM_EVCNT_ADD(&sc->sc_ev_rjc, CSR_READ(sc, WMREG_RJC));
   6711 
   6712 	if (sc->sc_type >= WM_T_82540) {
   6713 		WM_EVCNT_ADD(&sc->sc_ev_mgtprc, CSR_READ(sc, WMREG_MGTPRC));
   6714 		WM_EVCNT_ADD(&sc->sc_ev_mgtpdc, CSR_READ(sc, WMREG_MGTPDC));
   6715 		WM_EVCNT_ADD(&sc->sc_ev_mgtptc, CSR_READ(sc, WMREG_MGTPTC));
   6716 	}
   6717 
   6718 	/*
   6719 	 * The TOR(L) register includes:
   6720 	 *  - Error
   6721 	 *  - Flow control
   6722 	 *  - Broadcast rejected (This note is described in 82574 and newer
   6723 	 *    datasheets. What does "broadcast rejected" mean?)
   6724 	 */
   6725 	WM_EVCNT_ADD(&sc->sc_ev_tor,
   6726 	    CSR_READ(sc, WMREG_TORL) +
   6727 	    ((uint64_t)CSR_READ(sc, WMREG_TORH) << 32));
   6728 	WM_EVCNT_ADD(&sc->sc_ev_tot,
   6729 	    CSR_READ(sc, WMREG_TOTL) +
   6730 	    ((uint64_t)CSR_READ(sc, WMREG_TOTH) << 32));
   6731 
   6732 	WM_EVCNT_ADD(&sc->sc_ev_tpr, CSR_READ(sc, WMREG_TPR));
   6733 	WM_EVCNT_ADD(&sc->sc_ev_tpt, CSR_READ(sc, WMREG_TPT));
   6734 	WM_EVCNT_ADD(&sc->sc_ev_ptc64, CSR_READ(sc, WMREG_PTC64));
   6735 	WM_EVCNT_ADD(&sc->sc_ev_ptc127, CSR_READ(sc, WMREG_PTC127));
   6736 	WM_EVCNT_ADD(&sc->sc_ev_ptc255, CSR_READ(sc, WMREG_PTC255));
   6737 	WM_EVCNT_ADD(&sc->sc_ev_ptc511, CSR_READ(sc, WMREG_PTC511));
   6738 	WM_EVCNT_ADD(&sc->sc_ev_ptc1023, CSR_READ(sc, WMREG_PTC1023));
   6739 	WM_EVCNT_ADD(&sc->sc_ev_ptc1522, CSR_READ(sc, WMREG_PTC1522));
   6740 	WM_EVCNT_ADD(&sc->sc_ev_mptc, CSR_READ(sc, WMREG_MPTC));
   6741 	WM_EVCNT_ADD(&sc->sc_ev_bptc, CSR_READ(sc, WMREG_BPTC));
   6742 	if (sc->sc_type >= WM_T_82571)
   6743 		WM_EVCNT_ADD(&sc->sc_ev_iac, CSR_READ(sc, WMREG_IAC));
   6744 	if (sc->sc_type < WM_T_82575) {
   6745 		WM_EVCNT_ADD(&sc->sc_ev_icrxptc, CSR_READ(sc, WMREG_ICRXPTC));
   6746 		WM_EVCNT_ADD(&sc->sc_ev_icrxatc, CSR_READ(sc, WMREG_ICRXATC));
   6747 		WM_EVCNT_ADD(&sc->sc_ev_ictxptc, CSR_READ(sc, WMREG_ICTXPTC));
   6748 		WM_EVCNT_ADD(&sc->sc_ev_ictxatc, CSR_READ(sc, WMREG_ICTXATC));
   6749 		WM_EVCNT_ADD(&sc->sc_ev_ictxqec, CSR_READ(sc, WMREG_ICTXQEC));
   6750 		WM_EVCNT_ADD(&sc->sc_ev_ictxqmtc,
   6751 		    CSR_READ(sc, WMREG_ICTXQMTC));
   6752 		WM_EVCNT_ADD(&sc->sc_ev_rxdmtc,
   6753 		    CSR_READ(sc, WMREG_ICRXDMTC));
   6754 		WM_EVCNT_ADD(&sc->sc_ev_icrxoc, CSR_READ(sc, WMREG_ICRXOC));
   6755 	} else if (!WM_IS_ICHPCH(sc)) {
   6756 		WM_EVCNT_ADD(&sc->sc_ev_rpthc, CSR_READ(sc, WMREG_RPTHC));
   6757 		WM_EVCNT_ADD(&sc->sc_ev_debug1, CSR_READ(sc, WMREG_DEBUG1));
   6758 		WM_EVCNT_ADD(&sc->sc_ev_debug2, CSR_READ(sc, WMREG_DEBUG2));
   6759 		WM_EVCNT_ADD(&sc->sc_ev_debug3, CSR_READ(sc, WMREG_DEBUG3));
   6760 		WM_EVCNT_ADD(&sc->sc_ev_hgptc,  CSR_READ(sc, WMREG_HGPTC));
   6761 		WM_EVCNT_ADD(&sc->sc_ev_debug4, CSR_READ(sc, WMREG_DEBUG4));
   6762 		WM_EVCNT_ADD(&sc->sc_ev_rxdmtc, CSR_READ(sc, WMREG_RXDMTC));
   6763 		WM_EVCNT_ADD(&sc->sc_ev_htcbdpc, CSR_READ(sc, WMREG_HTCBDPC));
   6764 
   6765 		WM_EVCNT_ADD(&sc->sc_ev_hgorc,
   6766 		    CSR_READ(sc, WMREG_HGORCL) +
   6767 		    ((uint64_t)CSR_READ(sc, WMREG_HGORCH) << 32));
   6768 		WM_EVCNT_ADD(&sc->sc_ev_hgotc,
   6769 		    CSR_READ(sc, WMREG_HGOTCL) +
   6770 		    ((uint64_t)CSR_READ(sc, WMREG_HGOTCH) << 32));
   6771 		WM_EVCNT_ADD(&sc->sc_ev_lenerrs, CSR_READ(sc, WMREG_LENERRS));
   6772 		WM_EVCNT_ADD(&sc->sc_ev_scvpc, CSR_READ(sc, WMREG_SCVPC));
   6773 		WM_EVCNT_ADD(&sc->sc_ev_hrmpc, CSR_READ(sc, WMREG_HRMPC));
   6774 	}
   6775 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   6776 		WM_EVCNT_ADD(&sc->sc_ev_tlpic, CSR_READ(sc, WMREG_TLPIC));
   6777 		WM_EVCNT_ADD(&sc->sc_ev_rlpic, CSR_READ(sc, WMREG_RLPIC));
   6778 		if ((CSR_READ(sc, WMREG_MANC) & MANC_EN_BMC2OS) != 0) {
   6779 			WM_EVCNT_ADD(&sc->sc_ev_b2ogprc,
   6780 			    CSR_READ(sc, WMREG_B2OGPRC));
   6781 			WM_EVCNT_ADD(&sc->sc_ev_o2bspc,
   6782 			    CSR_READ(sc, WMREG_O2BSPC));
   6783 			WM_EVCNT_ADD(&sc->sc_ev_b2ospc,
   6784 			    CSR_READ(sc, WMREG_B2OSPC));
   6785 			WM_EVCNT_ADD(&sc->sc_ev_o2bgptc,
   6786 			    CSR_READ(sc, WMREG_O2BGPTC));
   6787 		}
   6788 	}
   6789 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   6790 	if_statadd_ref(nsr, if_collisions, colc);
   6791 	if_statadd_ref(nsr, if_ierrors,
   6792 	    crcerrs + algnerrc + symerrc + rxerrc + sec + cexterr + rlec);
   6793 	/*
   6794 	 * WMREG_RNBC is incremented when there are no available buffers in
   6795 	 * host memory. It does not mean the number of dropped packets, because
   6796 	 * an Ethernet controller can receive packets in such case if there is
   6797 	 * space in the phy's FIFO.
   6798 	 *
   6799 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   6800 	 * own EVCNT instead of if_iqdrops.
   6801 	 */
   6802 	if_statadd_ref(nsr, if_iqdrops, mpc);
   6803 	IF_STAT_PUTREF(ifp);
   6804 }
   6805 
   6806 void
   6807 wm_clear_evcnt(struct wm_softc *sc)
   6808 {
   6809 #ifdef WM_EVENT_COUNTERS
   6810 	int i;
   6811 
   6812 	/* RX queues */
   6813 	for (i = 0; i < sc->sc_nqueues; i++) {
   6814 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6815 
   6816 		WM_Q_EVCNT_STORE(rxq, intr, 0);
   6817 		WM_Q_EVCNT_STORE(rxq, defer, 0);
   6818 		WM_Q_EVCNT_STORE(rxq, ipsum, 0);
   6819 		WM_Q_EVCNT_STORE(rxq, tusum, 0);
   6820 	}
   6821 
   6822 	/* TX queues */
   6823 	for (i = 0; i < sc->sc_nqueues; i++) {
   6824 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6825 		int j;
   6826 
   6827 		WM_Q_EVCNT_STORE(txq, txsstall, 0);
   6828 		WM_Q_EVCNT_STORE(txq, txdstall, 0);
   6829 		WM_Q_EVCNT_STORE(txq, fifo_stall, 0);
   6830 		WM_Q_EVCNT_STORE(txq, txdw, 0);
   6831 		WM_Q_EVCNT_STORE(txq, txqe, 0);
   6832 		WM_Q_EVCNT_STORE(txq, ipsum, 0);
   6833 		WM_Q_EVCNT_STORE(txq, tusum, 0);
   6834 		WM_Q_EVCNT_STORE(txq, tusum6, 0);
   6835 		WM_Q_EVCNT_STORE(txq, tso, 0);
   6836 		WM_Q_EVCNT_STORE(txq, tso6, 0);
   6837 		WM_Q_EVCNT_STORE(txq, tsopain, 0);
   6838 
   6839 		for (j = 0; j < WM_NTXSEGS; j++)
   6840 			WM_EVCNT_STORE(&txq->txq_ev_txseg[j], 0);
   6841 
   6842 		WM_Q_EVCNT_STORE(txq, pcqdrop, 0);
   6843 		WM_Q_EVCNT_STORE(txq, descdrop, 0);
   6844 		WM_Q_EVCNT_STORE(txq, toomanyseg, 0);
   6845 		WM_Q_EVCNT_STORE(txq, defrag, 0);
   6846 		if (sc->sc_type <= WM_T_82544)
   6847 			WM_Q_EVCNT_STORE(txq, underrun, 0);
   6848 		WM_Q_EVCNT_STORE(txq, skipcontext, 0);
   6849 	}
   6850 
   6851 	/* Miscs */
   6852 	WM_EVCNT_STORE(&sc->sc_ev_linkintr, 0);
   6853 
   6854 	WM_EVCNT_STORE(&sc->sc_ev_crcerrs, 0);
   6855 	WM_EVCNT_STORE(&sc->sc_ev_symerrc, 0);
   6856 	WM_EVCNT_STORE(&sc->sc_ev_mpc, 0);
   6857 	WM_EVCNT_STORE(&sc->sc_ev_colc, 0);
   6858 	WM_EVCNT_STORE(&sc->sc_ev_sec, 0);
   6859 	WM_EVCNT_STORE(&sc->sc_ev_rlec, 0);
   6860 
   6861 	if (sc->sc_type >= WM_T_82543) {
   6862 		WM_EVCNT_STORE(&sc->sc_ev_algnerrc, 0);
   6863 		WM_EVCNT_STORE(&sc->sc_ev_rxerrc, 0);
   6864 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   6865 			WM_EVCNT_STORE(&sc->sc_ev_cexterr, 0);
   6866 		else
   6867 			WM_EVCNT_STORE(&sc->sc_ev_htdpmc, 0);
   6868 
   6869 		WM_EVCNT_STORE(&sc->sc_ev_tncrs, 0);
   6870 		WM_EVCNT_STORE(&sc->sc_ev_tsctc, 0);
   6871 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   6872 			WM_EVCNT_STORE(&sc->sc_ev_tsctfc, 0);
   6873 		else {
   6874 			WM_EVCNT_STORE(&sc->sc_ev_cbrdpc, 0);
   6875 			WM_EVCNT_STORE(&sc->sc_ev_cbrmpc, 0);
   6876 		}
   6877 	}
   6878 
   6879 	if (sc->sc_type >= WM_T_82542_2_1) {
   6880 		WM_EVCNT_STORE(&sc->sc_ev_tx_xoff, 0);
   6881 		WM_EVCNT_STORE(&sc->sc_ev_tx_xon, 0);
   6882 		WM_EVCNT_STORE(&sc->sc_ev_rx_xoff, 0);
   6883 		WM_EVCNT_STORE(&sc->sc_ev_rx_xon, 0);
   6884 		WM_EVCNT_STORE(&sc->sc_ev_rx_macctl, 0);
   6885 	}
   6886 
   6887 	WM_EVCNT_STORE(&sc->sc_ev_scc, 0);
   6888 	WM_EVCNT_STORE(&sc->sc_ev_ecol, 0);
   6889 	WM_EVCNT_STORE(&sc->sc_ev_mcc, 0);
   6890 	WM_EVCNT_STORE(&sc->sc_ev_latecol, 0);
   6891 
   6892 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
   6893 		WM_EVCNT_STORE(&sc->sc_ev_cbtmpc, 0);
   6894 
   6895 	WM_EVCNT_STORE(&sc->sc_ev_dc, 0);
   6896 	WM_EVCNT_STORE(&sc->sc_ev_prc64, 0);
   6897 	WM_EVCNT_STORE(&sc->sc_ev_prc127, 0);
   6898 	WM_EVCNT_STORE(&sc->sc_ev_prc255, 0);
   6899 	WM_EVCNT_STORE(&sc->sc_ev_prc511, 0);
   6900 	WM_EVCNT_STORE(&sc->sc_ev_prc1023, 0);
   6901 	WM_EVCNT_STORE(&sc->sc_ev_prc1522, 0);
   6902 	WM_EVCNT_STORE(&sc->sc_ev_gprc, 0);
   6903 	WM_EVCNT_STORE(&sc->sc_ev_bprc, 0);
   6904 	WM_EVCNT_STORE(&sc->sc_ev_mprc, 0);
   6905 	WM_EVCNT_STORE(&sc->sc_ev_gptc, 0);
   6906 	WM_EVCNT_STORE(&sc->sc_ev_gorc, 0);
   6907 	WM_EVCNT_STORE(&sc->sc_ev_gotc, 0);
   6908 	WM_EVCNT_STORE(&sc->sc_ev_rnbc, 0);
   6909 	WM_EVCNT_STORE(&sc->sc_ev_ruc, 0);
   6910 	WM_EVCNT_STORE(&sc->sc_ev_rfc, 0);
   6911 	WM_EVCNT_STORE(&sc->sc_ev_roc, 0);
   6912 	WM_EVCNT_STORE(&sc->sc_ev_rjc, 0);
   6913 	if (sc->sc_type >= WM_T_82540) {
   6914 		WM_EVCNT_STORE(&sc->sc_ev_mgtprc, 0);
   6915 		WM_EVCNT_STORE(&sc->sc_ev_mgtpdc, 0);
   6916 		WM_EVCNT_STORE(&sc->sc_ev_mgtptc, 0);
   6917 	}
   6918 	WM_EVCNT_STORE(&sc->sc_ev_tor, 0);
   6919 	WM_EVCNT_STORE(&sc->sc_ev_tot, 0);
   6920 	WM_EVCNT_STORE(&sc->sc_ev_tpr, 0);
   6921 	WM_EVCNT_STORE(&sc->sc_ev_tpt, 0);
   6922 	WM_EVCNT_STORE(&sc->sc_ev_ptc64, 0);
   6923 	WM_EVCNT_STORE(&sc->sc_ev_ptc127, 0);
   6924 	WM_EVCNT_STORE(&sc->sc_ev_ptc255, 0);
   6925 	WM_EVCNT_STORE(&sc->sc_ev_ptc511, 0);
   6926 	WM_EVCNT_STORE(&sc->sc_ev_ptc1023, 0);
   6927 	WM_EVCNT_STORE(&sc->sc_ev_ptc1522, 0);
   6928 	WM_EVCNT_STORE(&sc->sc_ev_mptc, 0);
   6929 	WM_EVCNT_STORE(&sc->sc_ev_bptc, 0);
   6930 	if (sc->sc_type >= WM_T_82571)
   6931 		WM_EVCNT_STORE(&sc->sc_ev_iac, 0);
   6932 	if (sc->sc_type < WM_T_82575) {
   6933 		WM_EVCNT_STORE(&sc->sc_ev_icrxptc, 0);
   6934 		WM_EVCNT_STORE(&sc->sc_ev_icrxatc, 0);
   6935 		WM_EVCNT_STORE(&sc->sc_ev_ictxptc, 0);
   6936 		WM_EVCNT_STORE(&sc->sc_ev_ictxatc, 0);
   6937 		WM_EVCNT_STORE(&sc->sc_ev_ictxqec, 0);
   6938 		WM_EVCNT_STORE(&sc->sc_ev_ictxqmtc, 0);
   6939 		WM_EVCNT_STORE(&sc->sc_ev_rxdmtc, 0);
   6940 		WM_EVCNT_STORE(&sc->sc_ev_icrxoc, 0);
   6941 	} else if (!WM_IS_ICHPCH(sc)) {
   6942 		WM_EVCNT_STORE(&sc->sc_ev_rpthc, 0);
   6943 		WM_EVCNT_STORE(&sc->sc_ev_debug1, 0);
   6944 		WM_EVCNT_STORE(&sc->sc_ev_debug2, 0);
   6945 		WM_EVCNT_STORE(&sc->sc_ev_debug3, 0);
   6946 		WM_EVCNT_STORE(&sc->sc_ev_hgptc, 0);
   6947 		WM_EVCNT_STORE(&sc->sc_ev_debug4, 0);
   6948 		WM_EVCNT_STORE(&sc->sc_ev_rxdmtc, 0);
   6949 		WM_EVCNT_STORE(&sc->sc_ev_htcbdpc, 0);
   6950 
   6951 		WM_EVCNT_STORE(&sc->sc_ev_hgorc, 0);
   6952 		WM_EVCNT_STORE(&sc->sc_ev_hgotc, 0);
   6953 		WM_EVCNT_STORE(&sc->sc_ev_lenerrs, 0);
   6954 		WM_EVCNT_STORE(&sc->sc_ev_scvpc, 0);
   6955 		WM_EVCNT_STORE(&sc->sc_ev_hrmpc, 0);
   6956 	}
   6957 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   6958 		WM_EVCNT_STORE(&sc->sc_ev_tlpic, 0);
   6959 		WM_EVCNT_STORE(&sc->sc_ev_rlpic, 0);
   6960 		WM_EVCNT_STORE(&sc->sc_ev_b2ogprc, 0);
   6961 		WM_EVCNT_STORE(&sc->sc_ev_o2bspc, 0);
   6962 		WM_EVCNT_STORE(&sc->sc_ev_b2ospc, 0);
   6963 		WM_EVCNT_STORE(&sc->sc_ev_o2bgptc, 0);
   6964 	}
   6965 #endif
   6966 }
   6967 
   6968 /*
   6969  * wm_init:		[ifnet interface function]
   6970  *
   6971  *	Initialize the interface.
   6972  */
   6973 static int
   6974 wm_init(struct ifnet *ifp)
   6975 {
   6976 	struct wm_softc *sc = ifp->if_softc;
   6977 	int ret;
   6978 
   6979 	KASSERT(IFNET_LOCKED(ifp));
   6980 
   6981 	if (sc->sc_dying)
   6982 		return ENXIO;
   6983 
   6984 	mutex_enter(sc->sc_core_lock);
   6985 	ret = wm_init_locked(ifp);
   6986 	mutex_exit(sc->sc_core_lock);
   6987 
   6988 	return ret;
   6989 }
   6990 
   6991 static int
   6992 wm_init_locked(struct ifnet *ifp)
   6993 {
   6994 	struct wm_softc *sc = ifp->if_softc;
   6995 	struct ethercom *ec = &sc->sc_ethercom;
   6996 	int i, j, trynum, error = 0;
   6997 	uint32_t reg, sfp_mask = 0;
   6998 
   6999 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7000 		device_xname(sc->sc_dev), __func__));
   7001 	KASSERT(IFNET_LOCKED(ifp));
   7002 	KASSERT(mutex_owned(sc->sc_core_lock));
   7003 
   7004 	/*
   7005 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   7006 	 * There is a small but measurable benefit to avoiding the adjusment
   7007 	 * of the descriptor so that the headers are aligned, for normal mtu,
   7008 	 * on such platforms.  One possibility is that the DMA itself is
   7009 	 * slightly more efficient if the front of the entire packet (instead
   7010 	 * of the front of the headers) is aligned.
   7011 	 *
   7012 	 * Note we must always set align_tweak to 0 if we are using
   7013 	 * jumbo frames.
   7014 	 */
   7015 #ifdef __NO_STRICT_ALIGNMENT
   7016 	sc->sc_align_tweak = 0;
   7017 #else
   7018 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   7019 		sc->sc_align_tweak = 0;
   7020 	else
   7021 		sc->sc_align_tweak = 2;
   7022 #endif /* __NO_STRICT_ALIGNMENT */
   7023 
   7024 	/* Cancel any pending I/O. */
   7025 	wm_stop_locked(ifp, false, false);
   7026 
   7027 	/* Update statistics before reset */
   7028 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   7029 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   7030 
   7031 	/* >= PCH_SPT hardware workaround before reset. */
   7032 	if (sc->sc_type >= WM_T_PCH_SPT)
   7033 		wm_flush_desc_rings(sc);
   7034 
   7035 	/* Reset the chip to a known state. */
   7036 	wm_reset(sc);
   7037 
   7038 	/*
   7039 	 * AMT based hardware can now take control from firmware
   7040 	 * Do this after reset.
   7041 	 */
   7042 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   7043 		wm_get_hw_control(sc);
   7044 
   7045 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   7046 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   7047 		wm_legacy_irq_quirk_spt(sc);
   7048 
   7049 	/* Init hardware bits */
   7050 	wm_initialize_hardware_bits(sc);
   7051 
   7052 	/* Reset the PHY. */
   7053 	if (sc->sc_flags & WM_F_HAS_MII)
   7054 		wm_gmii_reset(sc);
   7055 
   7056 	if (sc->sc_type >= WM_T_ICH8) {
   7057 		reg = CSR_READ(sc, WMREG_GCR);
   7058 		/*
   7059 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   7060 		 * default after reset.
   7061 		 */
   7062 		if (sc->sc_type == WM_T_ICH8)
   7063 			reg |= GCR_NO_SNOOP_ALL;
   7064 		else
   7065 			reg &= ~GCR_NO_SNOOP_ALL;
   7066 		CSR_WRITE(sc, WMREG_GCR, reg);
   7067 	}
   7068 
   7069 	/* Ungate DMA clock to avoid packet loss */
   7070 	if (sc->sc_type >= WM_T_PCH_TGP) {
   7071 		reg = CSR_READ(sc, WMREG_FFLT_DBG);
   7072 		reg |= (1 << 12);
   7073 		CSR_WRITE(sc, WMREG_FFLT_DBG, reg);
   7074 	}
   7075 
   7076 	if ((sc->sc_type >= WM_T_ICH8)
   7077 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   7078 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   7079 
   7080 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7081 		reg |= CTRL_EXT_RO_DIS;
   7082 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7083 	}
   7084 
   7085 	/* Calculate (E)ITR value */
   7086 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   7087 		/*
   7088 		 * For NEWQUEUE's EITR (except for 82575).
   7089 		 * 82575's EITR should be set same throttling value as other
   7090 		 * old controllers' ITR because the interrupt/sec calculation
   7091 		 * is the same, that is, 1,000,000,000 / (N * 256).
   7092 		 *
   7093 		 * 82574's EITR should be set same throttling value as ITR.
   7094 		 *
   7095 		 * For N interrupts/sec, set this value to:
   7096 		 * 1,000,000 / N in contrast to ITR throttling value.
   7097 		 */
   7098 		sc->sc_itr_init = 450;
   7099 	} else if (sc->sc_type >= WM_T_82543) {
   7100 		/*
   7101 		 * Set up the interrupt throttling register (units of 256ns)
   7102 		 * Note that a footnote in Intel's documentation says this
   7103 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   7104 		 * or 10Mbit mode.  Empirically, it appears to be the case
   7105 		 * that that is also true for the 1024ns units of the other
   7106 		 * interrupt-related timer registers -- so, really, we ought
   7107 		 * to divide this value by 4 when the link speed is low.
   7108 		 *
   7109 		 * XXX implement this division at link speed change!
   7110 		 */
   7111 
   7112 		/*
   7113 		 * For N interrupts/sec, set this value to:
   7114 		 * 1,000,000,000 / (N * 256).  Note that we set the
   7115 		 * absolute and packet timer values to this value
   7116 		 * divided by 4 to get "simple timer" behavior.
   7117 		 */
   7118 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   7119 	}
   7120 
   7121 	error = wm_init_txrx_queues(sc);
   7122 	if (error)
   7123 		goto out;
   7124 
   7125 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   7126 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   7127 	    (sc->sc_type >= WM_T_82575))
   7128 		wm_serdes_power_up_link_82575(sc);
   7129 
   7130 	/* Clear out the VLAN table -- we don't use it (yet). */
   7131 	CSR_WRITE(sc, WMREG_VET, 0);
   7132 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   7133 		trynum = 10; /* Due to hw errata */
   7134 	else
   7135 		trynum = 1;
   7136 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   7137 		for (j = 0; j < trynum; j++)
   7138 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   7139 
   7140 	/*
   7141 	 * Set up flow-control parameters.
   7142 	 *
   7143 	 * XXX Values could probably stand some tuning.
   7144 	 */
   7145 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   7146 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   7147 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   7148 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)
   7149 	    && (sc->sc_type != WM_T_PCH_TGP)) {
   7150 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   7151 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   7152 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   7153 	}
   7154 
   7155 	sc->sc_fcrtl = FCRTL_DFLT;
   7156 	if (sc->sc_type < WM_T_82543) {
   7157 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   7158 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   7159 	} else {
   7160 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   7161 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   7162 	}
   7163 
   7164 	if (sc->sc_type == WM_T_80003)
   7165 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   7166 	else
   7167 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   7168 
   7169 	/* Writes the control register. */
   7170 	wm_set_vlan(sc);
   7171 
   7172 	if (sc->sc_flags & WM_F_HAS_MII) {
   7173 		uint16_t kmreg;
   7174 
   7175 		switch (sc->sc_type) {
   7176 		case WM_T_80003:
   7177 		case WM_T_ICH8:
   7178 		case WM_T_ICH9:
   7179 		case WM_T_ICH10:
   7180 		case WM_T_PCH:
   7181 		case WM_T_PCH2:
   7182 		case WM_T_PCH_LPT:
   7183 		case WM_T_PCH_SPT:
   7184 		case WM_T_PCH_CNP:
   7185 		case WM_T_PCH_TGP:
   7186 			/*
   7187 			 * Set the mac to wait the maximum time between each
   7188 			 * iteration and increase the max iterations when
   7189 			 * polling the phy; this fixes erroneous timeouts at
   7190 			 * 10Mbps.
   7191 			 */
   7192 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   7193 			    0xFFFF);
   7194 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   7195 			    &kmreg);
   7196 			kmreg |= 0x3F;
   7197 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   7198 			    kmreg);
   7199 			break;
   7200 		default:
   7201 			break;
   7202 		}
   7203 
   7204 		if (sc->sc_type == WM_T_80003) {
   7205 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7206 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   7207 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7208 
   7209 			/* Bypass RX and TX FIFOs */
   7210 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   7211 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   7212 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   7213 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   7214 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   7215 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   7216 		}
   7217 	}
   7218 #if 0
   7219 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   7220 #endif
   7221 
   7222 	/* Set up checksum offload parameters. */
   7223 	reg = CSR_READ(sc, WMREG_RXCSUM);
   7224 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   7225 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   7226 		reg |= RXCSUM_IPOFL;
   7227 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   7228 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   7229 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   7230 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   7231 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   7232 
   7233 	/* Set registers about MSI-X */
   7234 	if (wm_is_using_msix(sc)) {
   7235 		uint32_t ivar, qintr_idx;
   7236 		struct wm_queue *wmq;
   7237 		unsigned int qid;
   7238 
   7239 		if (sc->sc_type == WM_T_82575) {
   7240 			/* Interrupt control */
   7241 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7242 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   7243 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7244 
   7245 			/* TX and RX */
   7246 			for (i = 0; i < sc->sc_nqueues; i++) {
   7247 				wmq = &sc->sc_queue[i];
   7248 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   7249 				    EITR_TX_QUEUE(wmq->wmq_id)
   7250 				    | EITR_RX_QUEUE(wmq->wmq_id));
   7251 			}
   7252 			/* Link status */
   7253 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   7254 			    EITR_OTHER);
   7255 		} else if (sc->sc_type == WM_T_82574) {
   7256 			/* Interrupt control */
   7257 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7258 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   7259 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7260 
   7261 			/*
   7262 			 * Work around issue with spurious interrupts
   7263 			 * in MSI-X mode.
   7264 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   7265 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   7266 			 */
   7267 			reg = CSR_READ(sc, WMREG_RFCTL);
   7268 			reg |= WMREG_RFCTL_ACKDIS;
   7269 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   7270 
   7271 			ivar = 0;
   7272 			/* TX and RX */
   7273 			for (i = 0; i < sc->sc_nqueues; i++) {
   7274 				wmq = &sc->sc_queue[i];
   7275 				qid = wmq->wmq_id;
   7276 				qintr_idx = wmq->wmq_intr_idx;
   7277 
   7278 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   7279 				    IVAR_TX_MASK_Q_82574(qid));
   7280 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   7281 				    IVAR_RX_MASK_Q_82574(qid));
   7282 			}
   7283 			/* Link status */
   7284 			ivar |= __SHIFTIN((IVAR_VALID_82574
   7285 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   7286 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   7287 		} else {
   7288 			/* Interrupt control */
   7289 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   7290 			    | GPIE_EIAME | GPIE_PBA);
   7291 
   7292 			switch (sc->sc_type) {
   7293 			case WM_T_82580:
   7294 			case WM_T_I350:
   7295 			case WM_T_I354:
   7296 			case WM_T_I210:
   7297 			case WM_T_I211:
   7298 				/* TX and RX */
   7299 				for (i = 0; i < sc->sc_nqueues; i++) {
   7300 					wmq = &sc->sc_queue[i];
   7301 					qid = wmq->wmq_id;
   7302 					qintr_idx = wmq->wmq_intr_idx;
   7303 
   7304 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   7305 					ivar &= ~IVAR_TX_MASK_Q(qid);
   7306 					ivar |= __SHIFTIN((qintr_idx
   7307 						| IVAR_VALID),
   7308 					    IVAR_TX_MASK_Q(qid));
   7309 					ivar &= ~IVAR_RX_MASK_Q(qid);
   7310 					ivar |= __SHIFTIN((qintr_idx
   7311 						| IVAR_VALID),
   7312 					    IVAR_RX_MASK_Q(qid));
   7313 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   7314 				}
   7315 				break;
   7316 			case WM_T_82576:
   7317 				/* TX and RX */
   7318 				for (i = 0; i < sc->sc_nqueues; i++) {
   7319 					wmq = &sc->sc_queue[i];
   7320 					qid = wmq->wmq_id;
   7321 					qintr_idx = wmq->wmq_intr_idx;
   7322 
   7323 					ivar = CSR_READ(sc,
   7324 					    WMREG_IVAR_Q_82576(qid));
   7325 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   7326 					ivar |= __SHIFTIN((qintr_idx
   7327 						| IVAR_VALID),
   7328 					    IVAR_TX_MASK_Q_82576(qid));
   7329 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   7330 					ivar |= __SHIFTIN((qintr_idx
   7331 						| IVAR_VALID),
   7332 					    IVAR_RX_MASK_Q_82576(qid));
   7333 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   7334 					    ivar);
   7335 				}
   7336 				break;
   7337 			default:
   7338 				break;
   7339 			}
   7340 
   7341 			/* Link status */
   7342 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   7343 			    IVAR_MISC_OTHER);
   7344 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   7345 		}
   7346 
   7347 		if (wm_is_using_multiqueue(sc)) {
   7348 			wm_init_rss(sc);
   7349 
   7350 			/*
   7351 			** NOTE: Receive Full-Packet Checksum Offload
   7352 			** is mutually exclusive with Multiqueue. However
   7353 			** this is not the same as TCP/IP checksums which
   7354 			** still work.
   7355 			*/
   7356 			reg = CSR_READ(sc, WMREG_RXCSUM);
   7357 			reg |= RXCSUM_PCSD;
   7358 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   7359 		}
   7360 	}
   7361 
   7362 	/* Set up the interrupt registers. */
   7363 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   7364 
   7365 	/* Enable SFP module insertion interrupt if it's required */
   7366 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   7367 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   7368 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7369 		sfp_mask = ICR_GPI(0);
   7370 	}
   7371 
   7372 	if (wm_is_using_msix(sc)) {
   7373 		uint32_t mask;
   7374 		struct wm_queue *wmq;
   7375 
   7376 		switch (sc->sc_type) {
   7377 		case WM_T_82574:
   7378 			mask = 0;
   7379 			for (i = 0; i < sc->sc_nqueues; i++) {
   7380 				wmq = &sc->sc_queue[i];
   7381 				mask |= ICR_TXQ(wmq->wmq_id);
   7382 				mask |= ICR_RXQ(wmq->wmq_id);
   7383 			}
   7384 			mask |= ICR_OTHER;
   7385 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   7386 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   7387 			break;
   7388 		default:
   7389 			if (sc->sc_type == WM_T_82575) {
   7390 				mask = 0;
   7391 				for (i = 0; i < sc->sc_nqueues; i++) {
   7392 					wmq = &sc->sc_queue[i];
   7393 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   7394 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   7395 				}
   7396 				mask |= EITR_OTHER;
   7397 			} else {
   7398 				mask = 0;
   7399 				for (i = 0; i < sc->sc_nqueues; i++) {
   7400 					wmq = &sc->sc_queue[i];
   7401 					mask |= 1 << wmq->wmq_intr_idx;
   7402 				}
   7403 				mask |= 1 << sc->sc_link_intr_idx;
   7404 			}
   7405 			CSR_WRITE(sc, WMREG_EIAC, mask);
   7406 			CSR_WRITE(sc, WMREG_EIAM, mask);
   7407 			CSR_WRITE(sc, WMREG_EIMS, mask);
   7408 
   7409 			/* For other interrupts */
   7410 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   7411 			break;
   7412 		}
   7413 	} else {
   7414 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   7415 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   7416 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   7417 	}
   7418 
   7419 	/* Set up the inter-packet gap. */
   7420 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   7421 
   7422 	if (sc->sc_type >= WM_T_82543) {
   7423 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7424 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   7425 			wm_itrs_writereg(sc, wmq);
   7426 		}
   7427 		/*
   7428 		 * Link interrupts occur much less than TX
   7429 		 * interrupts and RX interrupts. So, we don't
   7430 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   7431 		 * FreeBSD's if_igb.
   7432 		 */
   7433 	}
   7434 
   7435 	/* Set the VLAN EtherType. */
   7436 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   7437 
   7438 	/*
   7439 	 * Set up the transmit control register; we start out with
   7440 	 * a collision distance suitable for FDX, but update it when
   7441 	 * we resolve the media type.
   7442 	 */
   7443 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   7444 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   7445 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7446 	if (sc->sc_type >= WM_T_82571)
   7447 		sc->sc_tctl |= TCTL_MULR;
   7448 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7449 
   7450 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7451 		/* Write TDT after TCTL.EN is set. See the document. */
   7452 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   7453 	}
   7454 
   7455 	if (sc->sc_type == WM_T_80003) {
   7456 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   7457 		reg &= ~TCTL_EXT_GCEX_MASK;
   7458 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   7459 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   7460 	}
   7461 
   7462 	/* Set the media. */
   7463 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   7464 		goto out;
   7465 
   7466 	/* Configure for OS presence */
   7467 	wm_init_manageability(sc);
   7468 
   7469 	/*
   7470 	 * Set up the receive control register; we actually program the
   7471 	 * register when we set the receive filter. Use multicast address
   7472 	 * offset type 0.
   7473 	 *
   7474 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   7475 	 * don't enable that feature.
   7476 	 */
   7477 	sc->sc_mchash_type = 0;
   7478 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   7479 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   7480 
   7481 	/* 82574 use one buffer extended Rx descriptor. */
   7482 	if (sc->sc_type == WM_T_82574)
   7483 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   7484 
   7485 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
   7486 		sc->sc_rctl |= RCTL_SECRC;
   7487 
   7488 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   7489 	    && (ifp->if_mtu > ETHERMTU)) {
   7490 		sc->sc_rctl |= RCTL_LPE;
   7491 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7492 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   7493 	}
   7494 
   7495 	if (MCLBYTES == 2048)
   7496 		sc->sc_rctl |= RCTL_2k;
   7497 	else {
   7498 		if (sc->sc_type >= WM_T_82543) {
   7499 			switch (MCLBYTES) {
   7500 			case 4096:
   7501 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   7502 				break;
   7503 			case 8192:
   7504 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   7505 				break;
   7506 			case 16384:
   7507 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   7508 				break;
   7509 			default:
   7510 				panic("wm_init: MCLBYTES %d unsupported",
   7511 				    MCLBYTES);
   7512 				break;
   7513 			}
   7514 		} else
   7515 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   7516 	}
   7517 
   7518 	/* Enable ECC */
   7519 	switch (sc->sc_type) {
   7520 	case WM_T_82571:
   7521 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   7522 		reg |= PBA_ECC_CORR_EN;
   7523 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   7524 		break;
   7525 	case WM_T_PCH_LPT:
   7526 	case WM_T_PCH_SPT:
   7527 	case WM_T_PCH_CNP:
   7528 	case WM_T_PCH_TGP:
   7529 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   7530 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   7531 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   7532 
   7533 		sc->sc_ctrl |= CTRL_MEHE;
   7534 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7535 		break;
   7536 	default:
   7537 		break;
   7538 	}
   7539 
   7540 	/*
   7541 	 * Set the receive filter.
   7542 	 *
   7543 	 * For 82575 and 82576, the RX descriptors must be initialized after
   7544 	 * the setting of RCTL.EN in wm_set_filter()
   7545 	 */
   7546 	wm_set_filter(sc);
   7547 
   7548 	/* On 575 and later set RDT only if RX enabled */
   7549 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7550 		int qidx;
   7551 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7552 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   7553 			for (i = 0; i < WM_NRXDESC; i++) {
   7554 				mutex_enter(rxq->rxq_lock);
   7555 				wm_init_rxdesc(rxq, i);
   7556 				mutex_exit(rxq->rxq_lock);
   7557 
   7558 			}
   7559 		}
   7560 	}
   7561 
   7562 	wm_unset_stopping_flags(sc);
   7563 
   7564 	/* Start the one second link check clock. */
   7565 	callout_schedule(&sc->sc_tick_ch, hz);
   7566 
   7567 	/*
   7568 	 * ...all done! (IFNET_LOCKED asserted above.)
   7569 	 */
   7570 	ifp->if_flags |= IFF_RUNNING;
   7571 
   7572 out:
   7573 	/* Save last flags for the callback */
   7574 	sc->sc_if_flags = ifp->if_flags;
   7575 	sc->sc_ec_capenable = ec->ec_capenable;
   7576 	if (error)
   7577 		log(LOG_ERR, "%s: interface not running\n",
   7578 		    device_xname(sc->sc_dev));
   7579 	return error;
   7580 }
   7581 
   7582 /*
   7583  * wm_stop:		[ifnet interface function]
   7584  *
   7585  *	Stop transmission on the interface.
   7586  */
   7587 static void
   7588 wm_stop(struct ifnet *ifp, int disable)
   7589 {
   7590 	struct wm_softc *sc = ifp->if_softc;
   7591 
   7592 	ASSERT_SLEEPABLE();
   7593 	KASSERT(IFNET_LOCKED(ifp));
   7594 
   7595 	mutex_enter(sc->sc_core_lock);
   7596 	wm_stop_locked(ifp, disable ? true : false, true);
   7597 	mutex_exit(sc->sc_core_lock);
   7598 
   7599 	/*
   7600 	 * After wm_set_stopping_flags(), it is guaranteed that
   7601 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   7602 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   7603 	 * because it can sleep...
   7604 	 * so, call workqueue_wait() here.
   7605 	 */
   7606 	for (int i = 0; i < sc->sc_nqueues; i++)
   7607 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   7608 	workqueue_wait(sc->sc_reset_wq, &sc->sc_reset_work);
   7609 }
   7610 
   7611 static void
   7612 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   7613 {
   7614 	struct wm_softc *sc = ifp->if_softc;
   7615 	struct wm_txsoft *txs;
   7616 	int i, qidx;
   7617 
   7618 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7619 		device_xname(sc->sc_dev), __func__));
   7620 	KASSERT(IFNET_LOCKED(ifp));
   7621 	KASSERT(mutex_owned(sc->sc_core_lock));
   7622 
   7623 	wm_set_stopping_flags(sc);
   7624 
   7625 	if (sc->sc_flags & WM_F_HAS_MII) {
   7626 		/* Down the MII. */
   7627 		mii_down(&sc->sc_mii);
   7628 	} else {
   7629 #if 0
   7630 		/* Should we clear PHY's status properly? */
   7631 		wm_reset(sc);
   7632 #endif
   7633 	}
   7634 
   7635 	/* Stop the transmit and receive processes. */
   7636 	CSR_WRITE(sc, WMREG_TCTL, 0);
   7637 	CSR_WRITE(sc, WMREG_RCTL, 0);
   7638 	sc->sc_rctl &= ~RCTL_EN;
   7639 
   7640 	/*
   7641 	 * Clear the interrupt mask to ensure the device cannot assert its
   7642 	 * interrupt line.
   7643 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   7644 	 * service any currently pending or shared interrupt.
   7645 	 */
   7646 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   7647 	sc->sc_icr = 0;
   7648 	if (wm_is_using_msix(sc)) {
   7649 		if (sc->sc_type != WM_T_82574) {
   7650 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   7651 			CSR_WRITE(sc, WMREG_EIAC, 0);
   7652 		} else
   7653 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   7654 	}
   7655 
   7656 	/*
   7657 	 * Stop callouts after interrupts are disabled; if we have
   7658 	 * to wait for them, we will be releasing the CORE_LOCK
   7659 	 * briefly, which will unblock interrupts on the current CPU.
   7660 	 */
   7661 
   7662 	/* Stop the one second clock. */
   7663 	if (wait)
   7664 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   7665 	else
   7666 		callout_stop(&sc->sc_tick_ch);
   7667 
   7668 	/* Stop the 82547 Tx FIFO stall check timer. */
   7669 	if (sc->sc_type == WM_T_82547) {
   7670 		if (wait)
   7671 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   7672 		else
   7673 			callout_stop(&sc->sc_txfifo_ch);
   7674 	}
   7675 
   7676 	/* Release any queued transmit buffers. */
   7677 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7678 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   7679 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7680 		struct mbuf *m;
   7681 
   7682 		mutex_enter(txq->txq_lock);
   7683 		txq->txq_sending = false; /* Ensure watchdog disabled */
   7684 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7685 			txs = &txq->txq_soft[i];
   7686 			if (txs->txs_mbuf != NULL) {
   7687 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   7688 				m_freem(txs->txs_mbuf);
   7689 				txs->txs_mbuf = NULL;
   7690 			}
   7691 		}
   7692 		/* Drain txq_interq */
   7693 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7694 			m_freem(m);
   7695 		mutex_exit(txq->txq_lock);
   7696 	}
   7697 
   7698 	/* Mark the interface as down and cancel the watchdog timer. */
   7699 	ifp->if_flags &= ~IFF_RUNNING;
   7700 	sc->sc_if_flags = ifp->if_flags;
   7701 
   7702 	if (disable) {
   7703 		for (i = 0; i < sc->sc_nqueues; i++) {
   7704 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7705 			mutex_enter(rxq->rxq_lock);
   7706 			wm_rxdrain(rxq);
   7707 			mutex_exit(rxq->rxq_lock);
   7708 		}
   7709 	}
   7710 
   7711 #if 0 /* notyet */
   7712 	if (sc->sc_type >= WM_T_82544)
   7713 		CSR_WRITE(sc, WMREG_WUC, 0);
   7714 #endif
   7715 }
   7716 
   7717 static void
   7718 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   7719 {
   7720 	struct mbuf *m;
   7721 	int i;
   7722 
   7723 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   7724 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   7725 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   7726 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   7727 		    m->m_data, m->m_len, m->m_flags);
   7728 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   7729 	    i, i == 1 ? "" : "s");
   7730 }
   7731 
   7732 /*
   7733  * wm_82547_txfifo_stall:
   7734  *
   7735  *	Callout used to wait for the 82547 Tx FIFO to drain,
   7736  *	reset the FIFO pointers, and restart packet transmission.
   7737  */
   7738 static void
   7739 wm_82547_txfifo_stall(void *arg)
   7740 {
   7741 	struct wm_softc *sc = arg;
   7742 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7743 
   7744 	mutex_enter(txq->txq_lock);
   7745 
   7746 	if (txq->txq_stopping)
   7747 		goto out;
   7748 
   7749 	if (txq->txq_fifo_stall) {
   7750 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   7751 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   7752 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   7753 			/*
   7754 			 * Packets have drained.  Stop transmitter, reset
   7755 			 * FIFO pointers, restart transmitter, and kick
   7756 			 * the packet queue.
   7757 			 */
   7758 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   7759 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   7760 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   7761 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   7762 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   7763 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   7764 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   7765 			CSR_WRITE_FLUSH(sc);
   7766 
   7767 			txq->txq_fifo_head = 0;
   7768 			txq->txq_fifo_stall = 0;
   7769 			wm_start_locked(&sc->sc_ethercom.ec_if);
   7770 		} else {
   7771 			/*
   7772 			 * Still waiting for packets to drain; try again in
   7773 			 * another tick.
   7774 			 */
   7775 			callout_schedule(&sc->sc_txfifo_ch, 1);
   7776 		}
   7777 	}
   7778 
   7779 out:
   7780 	mutex_exit(txq->txq_lock);
   7781 }
   7782 
   7783 /*
   7784  * wm_82547_txfifo_bugchk:
   7785  *
   7786  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   7787  *	prevent enqueueing a packet that would wrap around the end
   7788  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   7789  *
   7790  *	We do this by checking the amount of space before the end
   7791  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   7792  *	the Tx FIFO, wait for all remaining packets to drain, reset
   7793  *	the internal FIFO pointers to the beginning, and restart
   7794  *	transmission on the interface.
   7795  */
   7796 #define	WM_FIFO_HDR		0x10
   7797 #define	WM_82547_PAD_LEN	0x3e0
   7798 static int
   7799 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   7800 {
   7801 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7802 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   7803 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   7804 
   7805 	/* Just return if already stalled. */
   7806 	if (txq->txq_fifo_stall)
   7807 		return 1;
   7808 
   7809 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   7810 		/* Stall only occurs in half-duplex mode. */
   7811 		goto send_packet;
   7812 	}
   7813 
   7814 	if (len >= WM_82547_PAD_LEN + space) {
   7815 		txq->txq_fifo_stall = 1;
   7816 		callout_schedule(&sc->sc_txfifo_ch, 1);
   7817 		return 1;
   7818 	}
   7819 
   7820 send_packet:
   7821 	txq->txq_fifo_head += len;
   7822 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   7823 		txq->txq_fifo_head -= txq->txq_fifo_size;
   7824 
   7825 	return 0;
   7826 }
   7827 
   7828 static int
   7829 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7830 {
   7831 	int error;
   7832 
   7833 	/*
   7834 	 * Allocate the control data structures, and create and load the
   7835 	 * DMA map for it.
   7836 	 *
   7837 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7838 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7839 	 * both sets within the same 4G segment.
   7840 	 */
   7841 	if (sc->sc_type < WM_T_82544)
   7842 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   7843 	else
   7844 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   7845 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7846 		txq->txq_descsize = sizeof(nq_txdesc_t);
   7847 	else
   7848 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   7849 
   7850 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   7851 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   7852 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   7853 		aprint_error_dev(sc->sc_dev,
   7854 		    "unable to allocate TX control data, error = %d\n",
   7855 		    error);
   7856 		goto fail_0;
   7857 	}
   7858 
   7859 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   7860 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   7861 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7862 		aprint_error_dev(sc->sc_dev,
   7863 		    "unable to map TX control data, error = %d\n", error);
   7864 		goto fail_1;
   7865 	}
   7866 
   7867 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   7868 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   7869 		aprint_error_dev(sc->sc_dev,
   7870 		    "unable to create TX control data DMA map, error = %d\n",
   7871 		    error);
   7872 		goto fail_2;
   7873 	}
   7874 
   7875 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   7876 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   7877 		aprint_error_dev(sc->sc_dev,
   7878 		    "unable to load TX control data DMA map, error = %d\n",
   7879 		    error);
   7880 		goto fail_3;
   7881 	}
   7882 
   7883 	return 0;
   7884 
   7885 fail_3:
   7886 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7887 fail_2:
   7888 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7889 	    WM_TXDESCS_SIZE(txq));
   7890 fail_1:
   7891 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7892 fail_0:
   7893 	return error;
   7894 }
   7895 
   7896 static void
   7897 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7898 {
   7899 
   7900 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   7901 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7902 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7903 	    WM_TXDESCS_SIZE(txq));
   7904 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7905 }
   7906 
   7907 static int
   7908 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7909 {
   7910 	int error;
   7911 	size_t rxq_descs_size;
   7912 
   7913 	/*
   7914 	 * Allocate the control data structures, and create and load the
   7915 	 * DMA map for it.
   7916 	 *
   7917 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7918 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7919 	 * both sets within the same 4G segment.
   7920 	 */
   7921 	rxq->rxq_ndesc = WM_NRXDESC;
   7922 	if (sc->sc_type == WM_T_82574)
   7923 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   7924 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7925 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   7926 	else
   7927 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   7928 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   7929 
   7930 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   7931 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   7932 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   7933 		aprint_error_dev(sc->sc_dev,
   7934 		    "unable to allocate RX control data, error = %d\n",
   7935 		    error);
   7936 		goto fail_0;
   7937 	}
   7938 
   7939 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   7940 		    rxq->rxq_desc_rseg, rxq_descs_size,
   7941 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7942 		aprint_error_dev(sc->sc_dev,
   7943 		    "unable to map RX control data, error = %d\n", error);
   7944 		goto fail_1;
   7945 	}
   7946 
   7947 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   7948 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   7949 		aprint_error_dev(sc->sc_dev,
   7950 		    "unable to create RX control data DMA map, error = %d\n",
   7951 		    error);
   7952 		goto fail_2;
   7953 	}
   7954 
   7955 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   7956 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   7957 		aprint_error_dev(sc->sc_dev,
   7958 		    "unable to load RX control data DMA map, error = %d\n",
   7959 		    error);
   7960 		goto fail_3;
   7961 	}
   7962 
   7963 	return 0;
   7964 
   7965  fail_3:
   7966 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7967  fail_2:
   7968 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7969 	    rxq_descs_size);
   7970  fail_1:
   7971 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7972  fail_0:
   7973 	return error;
   7974 }
   7975 
   7976 static void
   7977 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7978 {
   7979 
   7980 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7981 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7982 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7983 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   7984 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7985 }
   7986 
   7987 
   7988 static int
   7989 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7990 {
   7991 	int i, error;
   7992 
   7993 	/* Create the transmit buffer DMA maps. */
   7994 	WM_TXQUEUELEN(txq) =
   7995 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   7996 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   7997 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7998 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   7999 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   8000 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   8001 			aprint_error_dev(sc->sc_dev,
   8002 			    "unable to create Tx DMA map %d, error = %d\n",
   8003 			    i, error);
   8004 			goto fail;
   8005 		}
   8006 	}
   8007 
   8008 	return 0;
   8009 
   8010 fail:
   8011 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   8012 		if (txq->txq_soft[i].txs_dmamap != NULL)
   8013 			bus_dmamap_destroy(sc->sc_dmat,
   8014 			    txq->txq_soft[i].txs_dmamap);
   8015 	}
   8016 	return error;
   8017 }
   8018 
   8019 static void
   8020 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   8021 {
   8022 	int i;
   8023 
   8024 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   8025 		if (txq->txq_soft[i].txs_dmamap != NULL)
   8026 			bus_dmamap_destroy(sc->sc_dmat,
   8027 			    txq->txq_soft[i].txs_dmamap);
   8028 	}
   8029 }
   8030 
   8031 static int
   8032 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   8033 {
   8034 	int i, error;
   8035 
   8036 	/* Create the receive buffer DMA maps. */
   8037 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   8038 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   8039 			    MCLBYTES, 0, 0,
   8040 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   8041 			aprint_error_dev(sc->sc_dev,
   8042 			    "unable to create Rx DMA map %d error = %d\n",
   8043 			    i, error);
   8044 			goto fail;
   8045 		}
   8046 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   8047 	}
   8048 
   8049 	return 0;
   8050 
   8051  fail:
   8052 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   8053 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   8054 			bus_dmamap_destroy(sc->sc_dmat,
   8055 			    rxq->rxq_soft[i].rxs_dmamap);
   8056 	}
   8057 	return error;
   8058 }
   8059 
   8060 static void
   8061 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   8062 {
   8063 	int i;
   8064 
   8065 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   8066 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   8067 			bus_dmamap_destroy(sc->sc_dmat,
   8068 			    rxq->rxq_soft[i].rxs_dmamap);
   8069 	}
   8070 }
   8071 
   8072 /*
   8073  * wm_alloc_quques:
   8074  *	Allocate {tx,rx}descs and {tx,rx} buffers
   8075  */
   8076 static int
   8077 wm_alloc_txrx_queues(struct wm_softc *sc)
   8078 {
   8079 	int i, error, tx_done, rx_done;
   8080 
   8081 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   8082 	    KM_SLEEP);
   8083 	if (sc->sc_queue == NULL) {
   8084 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   8085 		error = ENOMEM;
   8086 		goto fail_0;
   8087 	}
   8088 
   8089 	/* For transmission */
   8090 	error = 0;
   8091 	tx_done = 0;
   8092 	for (i = 0; i < sc->sc_nqueues; i++) {
   8093 #ifdef WM_EVENT_COUNTERS
   8094 		int j;
   8095 		const char *xname;
   8096 #endif
   8097 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   8098 		txq->txq_sc = sc;
   8099 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   8100 
   8101 		error = wm_alloc_tx_descs(sc, txq);
   8102 		if (error)
   8103 			break;
   8104 		error = wm_alloc_tx_buffer(sc, txq);
   8105 		if (error) {
   8106 			wm_free_tx_descs(sc, txq);
   8107 			break;
   8108 		}
   8109 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   8110 		if (txq->txq_interq == NULL) {
   8111 			wm_free_tx_descs(sc, txq);
   8112 			wm_free_tx_buffer(sc, txq);
   8113 			error = ENOMEM;
   8114 			break;
   8115 		}
   8116 
   8117 #ifdef WM_EVENT_COUNTERS
   8118 		xname = device_xname(sc->sc_dev);
   8119 
   8120 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   8121 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   8122 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   8123 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   8124 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   8125 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   8126 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   8127 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   8128 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   8129 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   8130 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   8131 
   8132 		for (j = 0; j < WM_NTXSEGS; j++) {
   8133 			snprintf(txq->txq_txseg_evcnt_names[j],
   8134 			    sizeof(txq->txq_txseg_evcnt_names[j]),
   8135 			    "txq%02dtxseg%d", i, j);
   8136 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j],
   8137 			    EVCNT_TYPE_MISC,
   8138 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   8139 		}
   8140 
   8141 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   8142 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   8143 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   8144 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   8145 		/* Only for 82544 (and earlier?) */
   8146 		if (sc->sc_type <= WM_T_82544)
   8147 			WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   8148 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   8149 #endif /* WM_EVENT_COUNTERS */
   8150 
   8151 		tx_done++;
   8152 	}
   8153 	if (error)
   8154 		goto fail_1;
   8155 
   8156 	/* For receive */
   8157 	error = 0;
   8158 	rx_done = 0;
   8159 	for (i = 0; i < sc->sc_nqueues; i++) {
   8160 #ifdef WM_EVENT_COUNTERS
   8161 		const char *xname;
   8162 #endif
   8163 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   8164 		rxq->rxq_sc = sc;
   8165 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   8166 
   8167 		error = wm_alloc_rx_descs(sc, rxq);
   8168 		if (error)
   8169 			break;
   8170 
   8171 		error = wm_alloc_rx_buffer(sc, rxq);
   8172 		if (error) {
   8173 			wm_free_rx_descs(sc, rxq);
   8174 			break;
   8175 		}
   8176 
   8177 #ifdef WM_EVENT_COUNTERS
   8178 		xname = device_xname(sc->sc_dev);
   8179 
   8180 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   8181 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   8182 
   8183 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   8184 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   8185 #endif /* WM_EVENT_COUNTERS */
   8186 
   8187 		rx_done++;
   8188 	}
   8189 	if (error)
   8190 		goto fail_2;
   8191 
   8192 	return 0;
   8193 
   8194 fail_2:
   8195 	for (i = 0; i < rx_done; i++) {
   8196 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   8197 		wm_free_rx_buffer(sc, rxq);
   8198 		wm_free_rx_descs(sc, rxq);
   8199 		if (rxq->rxq_lock)
   8200 			mutex_obj_free(rxq->rxq_lock);
   8201 	}
   8202 fail_1:
   8203 	for (i = 0; i < tx_done; i++) {
   8204 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   8205 		pcq_destroy(txq->txq_interq);
   8206 		wm_free_tx_buffer(sc, txq);
   8207 		wm_free_tx_descs(sc, txq);
   8208 		if (txq->txq_lock)
   8209 			mutex_obj_free(txq->txq_lock);
   8210 	}
   8211 
   8212 	kmem_free(sc->sc_queue,
   8213 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   8214 fail_0:
   8215 	return error;
   8216 }
   8217 
   8218 /*
   8219  * wm_free_quques:
   8220  *	Free {tx,rx}descs and {tx,rx} buffers
   8221  */
   8222 static void
   8223 wm_free_txrx_queues(struct wm_softc *sc)
   8224 {
   8225 	int i;
   8226 
   8227 	for (i = 0; i < sc->sc_nqueues; i++) {
   8228 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   8229 
   8230 #ifdef WM_EVENT_COUNTERS
   8231 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   8232 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   8233 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   8234 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   8235 #endif /* WM_EVENT_COUNTERS */
   8236 
   8237 		wm_free_rx_buffer(sc, rxq);
   8238 		wm_free_rx_descs(sc, rxq);
   8239 		if (rxq->rxq_lock)
   8240 			mutex_obj_free(rxq->rxq_lock);
   8241 	}
   8242 
   8243 	for (i = 0; i < sc->sc_nqueues; i++) {
   8244 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   8245 		struct mbuf *m;
   8246 #ifdef WM_EVENT_COUNTERS
   8247 		int j;
   8248 
   8249 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   8250 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   8251 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   8252 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   8253 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   8254 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   8255 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   8256 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   8257 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   8258 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   8259 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   8260 
   8261 		for (j = 0; j < WM_NTXSEGS; j++)
   8262 			evcnt_detach(&txq->txq_ev_txseg[j]);
   8263 
   8264 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   8265 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   8266 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   8267 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   8268 		if (sc->sc_type <= WM_T_82544)
   8269 			WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   8270 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   8271 #endif /* WM_EVENT_COUNTERS */
   8272 
   8273 		/* Drain txq_interq */
   8274 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   8275 			m_freem(m);
   8276 		pcq_destroy(txq->txq_interq);
   8277 
   8278 		wm_free_tx_buffer(sc, txq);
   8279 		wm_free_tx_descs(sc, txq);
   8280 		if (txq->txq_lock)
   8281 			mutex_obj_free(txq->txq_lock);
   8282 	}
   8283 
   8284 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   8285 }
   8286 
   8287 static void
   8288 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   8289 {
   8290 
   8291 	KASSERT(mutex_owned(txq->txq_lock));
   8292 
   8293 	/* Initialize the transmit descriptor ring. */
   8294 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   8295 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   8296 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8297 	txq->txq_free = WM_NTXDESC(txq);
   8298 	txq->txq_next = 0;
   8299 }
   8300 
   8301 static void
   8302 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   8303     struct wm_txqueue *txq)
   8304 {
   8305 
   8306 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   8307 		device_xname(sc->sc_dev), __func__));
   8308 	KASSERT(mutex_owned(txq->txq_lock));
   8309 
   8310 	if (sc->sc_type < WM_T_82543) {
   8311 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   8312 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   8313 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   8314 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   8315 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   8316 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   8317 	} else {
   8318 		int qid = wmq->wmq_id;
   8319 
   8320 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   8321 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   8322 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   8323 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   8324 
   8325 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8326 			/*
   8327 			 * Don't write TDT before TCTL.EN is set.
   8328 			 * See the document.
   8329 			 */
   8330 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   8331 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   8332 			    | TXDCTL_WTHRESH(0));
   8333 		else {
   8334 			/* XXX should update with AIM? */
   8335 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   8336 			if (sc->sc_type >= WM_T_82540) {
   8337 				/* Should be the same */
   8338 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   8339 			}
   8340 
   8341 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   8342 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   8343 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   8344 		}
   8345 	}
   8346 }
   8347 
   8348 static void
   8349 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   8350 {
   8351 	int i;
   8352 
   8353 	KASSERT(mutex_owned(txq->txq_lock));
   8354 
   8355 	/* Initialize the transmit job descriptors. */
   8356 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   8357 		txq->txq_soft[i].txs_mbuf = NULL;
   8358 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   8359 	txq->txq_snext = 0;
   8360 	txq->txq_sdirty = 0;
   8361 }
   8362 
   8363 static void
   8364 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   8365     struct wm_txqueue *txq)
   8366 {
   8367 
   8368 	KASSERT(mutex_owned(txq->txq_lock));
   8369 
   8370 	/*
   8371 	 * Set up some register offsets that are different between
   8372 	 * the i82542 and the i82543 and later chips.
   8373 	 */
   8374 	if (sc->sc_type < WM_T_82543)
   8375 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   8376 	else
   8377 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   8378 
   8379 	wm_init_tx_descs(sc, txq);
   8380 	wm_init_tx_regs(sc, wmq, txq);
   8381 	wm_init_tx_buffer(sc, txq);
   8382 
   8383 	/* Clear other than WM_TXQ_LINKDOWN_DISCARD */
   8384 	txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD;
   8385 
   8386 	txq->txq_sending = false;
   8387 }
   8388 
   8389 static void
   8390 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   8391     struct wm_rxqueue *rxq)
   8392 {
   8393 
   8394 	KASSERT(mutex_owned(rxq->rxq_lock));
   8395 
   8396 	/*
   8397 	 * Initialize the receive descriptor and receive job
   8398 	 * descriptor rings.
   8399 	 */
   8400 	if (sc->sc_type < WM_T_82543) {
   8401 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   8402 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   8403 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   8404 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   8405 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   8406 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   8407 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   8408 
   8409 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   8410 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   8411 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   8412 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   8413 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   8414 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   8415 	} else {
   8416 		int qid = wmq->wmq_id;
   8417 
   8418 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   8419 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   8420 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   8421 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   8422 
   8423 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8424 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   8425 				panic("%s: MCLBYTES %d unsupported for 82575 "
   8426 				    "or higher\n", __func__, MCLBYTES);
   8427 
   8428 			/*
   8429 			 * Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF
   8430 			 * only.
   8431 			 */
   8432 			CSR_WRITE(sc, WMREG_SRRCTL(qid),
   8433 			    SRRCTL_DESCTYPE_ADV_ONEBUF
   8434 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   8435 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   8436 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   8437 			    | RXDCTL_WTHRESH(1));
   8438 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   8439 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   8440 		} else {
   8441 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   8442 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   8443 			/* XXX should update with AIM? */
   8444 			CSR_WRITE(sc, WMREG_RDTR,
   8445 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   8446 			/* MUST be same */
   8447 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   8448 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   8449 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   8450 		}
   8451 	}
   8452 }
   8453 
   8454 static int
   8455 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   8456 {
   8457 	struct wm_rxsoft *rxs;
   8458 	int error, i;
   8459 
   8460 	KASSERT(mutex_owned(rxq->rxq_lock));
   8461 
   8462 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   8463 		rxs = &rxq->rxq_soft[i];
   8464 		if (rxs->rxs_mbuf == NULL) {
   8465 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   8466 				log(LOG_ERR, "%s: unable to allocate or map "
   8467 				    "rx buffer %d, error = %d\n",
   8468 				    device_xname(sc->sc_dev), i, error);
   8469 				/*
   8470 				 * XXX Should attempt to run with fewer receive
   8471 				 * XXX buffers instead of just failing.
   8472 				 */
   8473 				wm_rxdrain(rxq);
   8474 				return ENOMEM;
   8475 			}
   8476 		} else {
   8477 			/*
   8478 			 * For 82575 and 82576, the RX descriptors must be
   8479 			 * initialized after the setting of RCTL.EN in
   8480 			 * wm_set_filter()
   8481 			 */
   8482 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   8483 				wm_init_rxdesc(rxq, i);
   8484 		}
   8485 	}
   8486 	rxq->rxq_ptr = 0;
   8487 	rxq->rxq_discard = 0;
   8488 	WM_RXCHAIN_RESET(rxq);
   8489 
   8490 	return 0;
   8491 }
   8492 
   8493 static int
   8494 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   8495     struct wm_rxqueue *rxq)
   8496 {
   8497 
   8498 	KASSERT(mutex_owned(rxq->rxq_lock));
   8499 
   8500 	/*
   8501 	 * Set up some register offsets that are different between
   8502 	 * the i82542 and the i82543 and later chips.
   8503 	 */
   8504 	if (sc->sc_type < WM_T_82543)
   8505 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   8506 	else
   8507 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   8508 
   8509 	wm_init_rx_regs(sc, wmq, rxq);
   8510 	return wm_init_rx_buffer(sc, rxq);
   8511 }
   8512 
   8513 /*
   8514  * wm_init_quques:
   8515  *	Initialize {tx,rx}descs and {tx,rx} buffers
   8516  */
   8517 static int
   8518 wm_init_txrx_queues(struct wm_softc *sc)
   8519 {
   8520 	int i, error = 0;
   8521 
   8522 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   8523 		device_xname(sc->sc_dev), __func__));
   8524 
   8525 	for (i = 0; i < sc->sc_nqueues; i++) {
   8526 		struct wm_queue *wmq = &sc->sc_queue[i];
   8527 		struct wm_txqueue *txq = &wmq->wmq_txq;
   8528 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8529 
   8530 		/*
   8531 		 * TODO
   8532 		 * Currently, use constant variable instead of AIM.
   8533 		 * Furthermore, the interrupt interval of multiqueue which use
   8534 		 * polling mode is less than default value.
   8535 		 * More tuning and AIM are required.
   8536 		 */
   8537 		if (wm_is_using_multiqueue(sc))
   8538 			wmq->wmq_itr = 50;
   8539 		else
   8540 			wmq->wmq_itr = sc->sc_itr_init;
   8541 		wmq->wmq_set_itr = true;
   8542 
   8543 		mutex_enter(txq->txq_lock);
   8544 		wm_init_tx_queue(sc, wmq, txq);
   8545 		mutex_exit(txq->txq_lock);
   8546 
   8547 		mutex_enter(rxq->rxq_lock);
   8548 		error = wm_init_rx_queue(sc, wmq, rxq);
   8549 		mutex_exit(rxq->rxq_lock);
   8550 		if (error)
   8551 			break;
   8552 	}
   8553 
   8554 	return error;
   8555 }
   8556 
   8557 /*
   8558  * wm_tx_offload:
   8559  *
   8560  *	Set up TCP/IP checksumming parameters for the
   8561  *	specified packet.
   8562  */
   8563 static void
   8564 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8565     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   8566 {
   8567 	struct mbuf *m0 = txs->txs_mbuf;
   8568 	struct livengood_tcpip_ctxdesc *t;
   8569 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   8570 	uint32_t ipcse;
   8571 	struct ether_header *eh;
   8572 	int offset, iphl;
   8573 	uint8_t fields;
   8574 
   8575 	/*
   8576 	 * XXX It would be nice if the mbuf pkthdr had offset
   8577 	 * fields for the protocol headers.
   8578 	 */
   8579 
   8580 	eh = mtod(m0, struct ether_header *);
   8581 	switch (htons(eh->ether_type)) {
   8582 	case ETHERTYPE_IP:
   8583 	case ETHERTYPE_IPV6:
   8584 		offset = ETHER_HDR_LEN;
   8585 		break;
   8586 
   8587 	case ETHERTYPE_VLAN:
   8588 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8589 		break;
   8590 
   8591 	default:
   8592 		/* Don't support this protocol or encapsulation. */
   8593 		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8594 		txq->txq_last_hw_ipcs = 0;
   8595 		txq->txq_last_hw_tucs = 0;
   8596 		*fieldsp = 0;
   8597 		*cmdp = 0;
   8598 		return;
   8599 	}
   8600 
   8601 	if ((m0->m_pkthdr.csum_flags &
   8602 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8603 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8604 	} else
   8605 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8606 
   8607 	ipcse = offset + iphl - 1;
   8608 
   8609 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   8610 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   8611 	seg = 0;
   8612 	fields = 0;
   8613 
   8614 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8615 		int hlen = offset + iphl;
   8616 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8617 
   8618 		if (__predict_false(m0->m_len <
   8619 				    (hlen + sizeof(struct tcphdr)))) {
   8620 			/*
   8621 			 * TCP/IP headers are not in the first mbuf; we need
   8622 			 * to do this the slow and painful way. Let's just
   8623 			 * hope this doesn't happen very often.
   8624 			 */
   8625 			struct tcphdr th;
   8626 
   8627 			WM_Q_EVCNT_INCR(txq, tsopain);
   8628 
   8629 			m_copydata(m0, hlen, sizeof(th), &th);
   8630 			if (v4) {
   8631 				struct ip ip;
   8632 
   8633 				m_copydata(m0, offset, sizeof(ip), &ip);
   8634 				ip.ip_len = 0;
   8635 				m_copyback(m0,
   8636 				    offset + offsetof(struct ip, ip_len),
   8637 				    sizeof(ip.ip_len), &ip.ip_len);
   8638 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8639 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8640 			} else {
   8641 				struct ip6_hdr ip6;
   8642 
   8643 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8644 				ip6.ip6_plen = 0;
   8645 				m_copyback(m0,
   8646 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8647 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8648 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8649 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8650 			}
   8651 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8652 			    sizeof(th.th_sum), &th.th_sum);
   8653 
   8654 			hlen += th.th_off << 2;
   8655 		} else {
   8656 			/*
   8657 			 * TCP/IP headers are in the first mbuf; we can do
   8658 			 * this the easy way.
   8659 			 */
   8660 			struct tcphdr *th;
   8661 
   8662 			if (v4) {
   8663 				struct ip *ip =
   8664 				    (void *)(mtod(m0, char *) + offset);
   8665 				th = (void *)(mtod(m0, char *) + hlen);
   8666 
   8667 				ip->ip_len = 0;
   8668 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8669 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8670 			} else {
   8671 				struct ip6_hdr *ip6 =
   8672 				    (void *)(mtod(m0, char *) + offset);
   8673 				th = (void *)(mtod(m0, char *) + hlen);
   8674 
   8675 				ip6->ip6_plen = 0;
   8676 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8677 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8678 			}
   8679 			hlen += th->th_off << 2;
   8680 		}
   8681 
   8682 		if (v4) {
   8683 			WM_Q_EVCNT_INCR(txq, tso);
   8684 			cmdlen |= WTX_TCPIP_CMD_IP;
   8685 		} else {
   8686 			WM_Q_EVCNT_INCR(txq, tso6);
   8687 			ipcse = 0;
   8688 		}
   8689 		cmd |= WTX_TCPIP_CMD_TSE;
   8690 		cmdlen |= WTX_TCPIP_CMD_TSE |
   8691 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   8692 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   8693 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   8694 	}
   8695 
   8696 	/*
   8697 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   8698 	 * offload feature, if we load the context descriptor, we
   8699 	 * MUST provide valid values for IPCSS and TUCSS fields.
   8700 	 */
   8701 
   8702 	ipcs = WTX_TCPIP_IPCSS(offset) |
   8703 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   8704 	    WTX_TCPIP_IPCSE(ipcse);
   8705 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   8706 		WM_Q_EVCNT_INCR(txq, ipsum);
   8707 		fields |= WTX_IXSM;
   8708 	}
   8709 
   8710 	offset += iphl;
   8711 
   8712 	if (m0->m_pkthdr.csum_flags &
   8713 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   8714 		WM_Q_EVCNT_INCR(txq, tusum);
   8715 		fields |= WTX_TXSM;
   8716 		tucs = WTX_TCPIP_TUCSS(offset) |
   8717 		    WTX_TCPIP_TUCSO(offset +
   8718 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   8719 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8720 	} else if ((m0->m_pkthdr.csum_flags &
   8721 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   8722 		WM_Q_EVCNT_INCR(txq, tusum6);
   8723 		fields |= WTX_TXSM;
   8724 		tucs = WTX_TCPIP_TUCSS(offset) |
   8725 		    WTX_TCPIP_TUCSO(offset +
   8726 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   8727 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8728 	} else {
   8729 		/* Just initialize it to a valid TCP context. */
   8730 		tucs = WTX_TCPIP_TUCSS(offset) |
   8731 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   8732 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8733 	}
   8734 
   8735 	*cmdp = cmd;
   8736 	*fieldsp = fields;
   8737 
   8738 	/*
   8739 	 * We don't have to write context descriptor for every packet
   8740 	 * except for 82574. For 82574, we must write context descriptor
   8741 	 * for every packet when we use two descriptor queues.
   8742 	 *
   8743 	 * The 82574L can only remember the *last* context used
   8744 	 * regardless of queue that it was use for.  We cannot reuse
   8745 	 * contexts on this hardware platform and must generate a new
   8746 	 * context every time.  82574L hardware spec, section 7.2.6,
   8747 	 * second note.
   8748 	 */
   8749 	if (sc->sc_nqueues < 2) {
   8750 		/*
   8751 		 * Setting up new checksum offload context for every
   8752 		 * frames takes a lot of processing time for hardware.
   8753 		 * This also reduces performance a lot for small sized
   8754 		 * frames so avoid it if driver can use previously
   8755 		 * configured checksum offload context.
   8756 		 * For TSO, in theory we can use the same TSO context only if
   8757 		 * frame is the same type(IP/TCP) and the same MSS. However
   8758 		 * checking whether a frame has the same IP/TCP structure is a
   8759 		 * hard thing so just ignore that and always restablish a
   8760 		 * new TSO context.
   8761 		 */
   8762 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   8763 		    == 0) {
   8764 			if (txq->txq_last_hw_cmd == cmd &&
   8765 			    txq->txq_last_hw_fields == fields &&
   8766 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   8767 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   8768 				WM_Q_EVCNT_INCR(txq, skipcontext);
   8769 				return;
   8770 			}
   8771 		}
   8772 
   8773 		txq->txq_last_hw_cmd = cmd;
   8774 		txq->txq_last_hw_fields = fields;
   8775 		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   8776 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   8777 	}
   8778 
   8779 	/* Fill in the context descriptor. */
   8780 	t = (struct livengood_tcpip_ctxdesc *)
   8781 	    &txq->txq_descs[txq->txq_next];
   8782 	t->tcpip_ipcs = htole32(ipcs);
   8783 	t->tcpip_tucs = htole32(tucs);
   8784 	t->tcpip_cmdlen = htole32(cmdlen);
   8785 	t->tcpip_seg = htole32(seg);
   8786 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8787 
   8788 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8789 	txs->txs_ndesc++;
   8790 }
   8791 
   8792 static inline int
   8793 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   8794 {
   8795 	struct wm_softc *sc = ifp->if_softc;
   8796 	u_int cpuid = cpu_index(curcpu());
   8797 
   8798 	/*
   8799 	 * Currently, simple distribute strategy.
   8800 	 * TODO:
   8801 	 * distribute by flowid(RSS has value).
   8802 	 */
   8803 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   8804 }
   8805 
   8806 static inline bool
   8807 wm_linkdown_discard(struct wm_txqueue *txq)
   8808 {
   8809 
   8810 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
   8811 		return true;
   8812 
   8813 	return false;
   8814 }
   8815 
   8816 /*
   8817  * wm_start:		[ifnet interface function]
   8818  *
   8819  *	Start packet transmission on the interface.
   8820  */
   8821 static void
   8822 wm_start(struct ifnet *ifp)
   8823 {
   8824 	struct wm_softc *sc = ifp->if_softc;
   8825 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8826 
   8827 	KASSERT(if_is_mpsafe(ifp));
   8828 	/*
   8829 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8830 	 */
   8831 
   8832 	mutex_enter(txq->txq_lock);
   8833 	if (!txq->txq_stopping)
   8834 		wm_start_locked(ifp);
   8835 	mutex_exit(txq->txq_lock);
   8836 }
   8837 
   8838 static void
   8839 wm_start_locked(struct ifnet *ifp)
   8840 {
   8841 	struct wm_softc *sc = ifp->if_softc;
   8842 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8843 
   8844 	wm_send_common_locked(ifp, txq, false);
   8845 }
   8846 
   8847 static int
   8848 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   8849 {
   8850 	int qid;
   8851 	struct wm_softc *sc = ifp->if_softc;
   8852 	struct wm_txqueue *txq;
   8853 
   8854 	qid = wm_select_txqueue(ifp, m);
   8855 	txq = &sc->sc_queue[qid].wmq_txq;
   8856 
   8857 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8858 		m_freem(m);
   8859 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8860 		return ENOBUFS;
   8861 	}
   8862 
   8863 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8864 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8865 	if (m->m_flags & M_MCAST)
   8866 		if_statinc_ref(nsr, if_omcasts);
   8867 	IF_STAT_PUTREF(ifp);
   8868 
   8869 	if (mutex_tryenter(txq->txq_lock)) {
   8870 		if (!txq->txq_stopping)
   8871 			wm_transmit_locked(ifp, txq);
   8872 		mutex_exit(txq->txq_lock);
   8873 	}
   8874 
   8875 	return 0;
   8876 }
   8877 
   8878 static void
   8879 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8880 {
   8881 
   8882 	wm_send_common_locked(ifp, txq, true);
   8883 }
   8884 
   8885 static void
   8886 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8887     bool is_transmit)
   8888 {
   8889 	struct wm_softc *sc = ifp->if_softc;
   8890 	struct mbuf *m0;
   8891 	struct wm_txsoft *txs;
   8892 	bus_dmamap_t dmamap;
   8893 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   8894 	bus_addr_t curaddr;
   8895 	bus_size_t seglen, curlen;
   8896 	uint32_t cksumcmd;
   8897 	uint8_t cksumfields;
   8898 	bool remap = true;
   8899 
   8900 	KASSERT(mutex_owned(txq->txq_lock));
   8901 	KASSERT(!txq->txq_stopping);
   8902 
   8903 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8904 		return;
   8905 
   8906 	if (__predict_false(wm_linkdown_discard(txq))) {
   8907 		do {
   8908 			if (is_transmit)
   8909 				m0 = pcq_get(txq->txq_interq);
   8910 			else
   8911 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   8912 			/*
   8913 			 * increment successed packet counter as in the case
   8914 			 * which the packet is discarded by link down PHY.
   8915 			 */
   8916 			if (m0 != NULL) {
   8917 				if_statinc(ifp, if_opackets);
   8918 				m_freem(m0);
   8919 			}
   8920 		} while (m0 != NULL);
   8921 		return;
   8922 	}
   8923 
   8924 	/* Remember the previous number of free descriptors. */
   8925 	ofree = txq->txq_free;
   8926 
   8927 	/*
   8928 	 * Loop through the send queue, setting up transmit descriptors
   8929 	 * until we drain the queue, or use up all available transmit
   8930 	 * descriptors.
   8931 	 */
   8932 	for (;;) {
   8933 		m0 = NULL;
   8934 
   8935 		/* Get a work queue entry. */
   8936 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8937 			wm_txeof(txq, UINT_MAX);
   8938 			if (txq->txq_sfree == 0) {
   8939 				DPRINTF(sc, WM_DEBUG_TX,
   8940 				    ("%s: TX: no free job descriptors\n",
   8941 					device_xname(sc->sc_dev)));
   8942 				WM_Q_EVCNT_INCR(txq, txsstall);
   8943 				break;
   8944 			}
   8945 		}
   8946 
   8947 		/* Grab a packet off the queue. */
   8948 		if (is_transmit)
   8949 			m0 = pcq_get(txq->txq_interq);
   8950 		else
   8951 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8952 		if (m0 == NULL)
   8953 			break;
   8954 
   8955 		DPRINTF(sc, WM_DEBUG_TX,
   8956 		    ("%s: TX: have packet to transmit: %p\n",
   8957 			device_xname(sc->sc_dev), m0));
   8958 
   8959 		txs = &txq->txq_soft[txq->txq_snext];
   8960 		dmamap = txs->txs_dmamap;
   8961 
   8962 		use_tso = (m0->m_pkthdr.csum_flags &
   8963 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   8964 
   8965 		/*
   8966 		 * So says the Linux driver:
   8967 		 * The controller does a simple calculation to make sure
   8968 		 * there is enough room in the FIFO before initiating the
   8969 		 * DMA for each buffer. The calc is:
   8970 		 *	4 = ceil(buffer len / MSS)
   8971 		 * To make sure we don't overrun the FIFO, adjust the max
   8972 		 * buffer len if the MSS drops.
   8973 		 */
   8974 		dmamap->dm_maxsegsz =
   8975 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   8976 		    ? m0->m_pkthdr.segsz << 2
   8977 		    : WTX_MAX_LEN;
   8978 
   8979 		/*
   8980 		 * Load the DMA map.  If this fails, the packet either
   8981 		 * didn't fit in the allotted number of segments, or we
   8982 		 * were short on resources.  For the too-many-segments
   8983 		 * case, we simply report an error and drop the packet,
   8984 		 * since we can't sanely copy a jumbo packet to a single
   8985 		 * buffer.
   8986 		 */
   8987 retry:
   8988 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8989 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8990 		if (__predict_false(error)) {
   8991 			if (error == EFBIG) {
   8992 				if (remap == true) {
   8993 					struct mbuf *m;
   8994 
   8995 					remap = false;
   8996 					m = m_defrag(m0, M_NOWAIT);
   8997 					if (m != NULL) {
   8998 						WM_Q_EVCNT_INCR(txq, defrag);
   8999 						m0 = m;
   9000 						goto retry;
   9001 					}
   9002 				}
   9003 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   9004 				log(LOG_ERR, "%s: Tx packet consumes too many "
   9005 				    "DMA segments, dropping...\n",
   9006 				    device_xname(sc->sc_dev));
   9007 				wm_dump_mbuf_chain(sc, m0);
   9008 				m_freem(m0);
   9009 				continue;
   9010 			}
   9011 			/* Short on resources, just stop for now. */
   9012 			DPRINTF(sc, WM_DEBUG_TX,
   9013 			    ("%s: TX: dmamap load failed: %d\n",
   9014 				device_xname(sc->sc_dev), error));
   9015 			break;
   9016 		}
   9017 
   9018 		segs_needed = dmamap->dm_nsegs;
   9019 		if (use_tso) {
   9020 			/* For sentinel descriptor; see below. */
   9021 			segs_needed++;
   9022 		}
   9023 
   9024 		/*
   9025 		 * Ensure we have enough descriptors free to describe
   9026 		 * the packet. Note, we always reserve one descriptor
   9027 		 * at the end of the ring due to the semantics of the
   9028 		 * TDT register, plus one more in the event we need
   9029 		 * to load offload context.
   9030 		 */
   9031 		if (segs_needed > txq->txq_free - 2) {
   9032 			/*
   9033 			 * Not enough free descriptors to transmit this
   9034 			 * packet.  We haven't committed anything yet,
   9035 			 * so just unload the DMA map, put the packet
   9036 			 * pack on the queue, and punt. Notify the upper
   9037 			 * layer that there are no more slots left.
   9038 			 */
   9039 			DPRINTF(sc, WM_DEBUG_TX,
   9040 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   9041 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   9042 				segs_needed, txq->txq_free - 1));
   9043 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   9044 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   9045 			WM_Q_EVCNT_INCR(txq, txdstall);
   9046 			break;
   9047 		}
   9048 
   9049 		/*
   9050 		 * Check for 82547 Tx FIFO bug. We need to do this
   9051 		 * once we know we can transmit the packet, since we
   9052 		 * do some internal FIFO space accounting here.
   9053 		 */
   9054 		if (sc->sc_type == WM_T_82547 &&
   9055 		    wm_82547_txfifo_bugchk(sc, m0)) {
   9056 			DPRINTF(sc, WM_DEBUG_TX,
   9057 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   9058 				device_xname(sc->sc_dev)));
   9059 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   9060 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   9061 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   9062 			break;
   9063 		}
   9064 
   9065 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   9066 
   9067 		DPRINTF(sc, WM_DEBUG_TX,
   9068 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   9069 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   9070 
   9071 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   9072 
   9073 		/*
   9074 		 * Store a pointer to the packet so that we can free it
   9075 		 * later.
   9076 		 *
   9077 		 * Initially, we consider the number of descriptors the
   9078 		 * packet uses the number of DMA segments.  This may be
   9079 		 * incremented by 1 if we do checksum offload (a descriptor
   9080 		 * is used to set the checksum context).
   9081 		 */
   9082 		txs->txs_mbuf = m0;
   9083 		txs->txs_firstdesc = txq->txq_next;
   9084 		txs->txs_ndesc = segs_needed;
   9085 
   9086 		/* Set up offload parameters for this packet. */
   9087 		if (m0->m_pkthdr.csum_flags &
   9088 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   9089 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9090 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   9091 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   9092 		} else {
   9093 			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   9094 			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   9095 			cksumcmd = 0;
   9096 			cksumfields = 0;
   9097 		}
   9098 
   9099 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   9100 
   9101 		/* Sync the DMA map. */
   9102 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   9103 		    BUS_DMASYNC_PREWRITE);
   9104 
   9105 		/* Initialize the transmit descriptor. */
   9106 		for (nexttx = txq->txq_next, seg = 0;
   9107 		     seg < dmamap->dm_nsegs; seg++) {
   9108 			for (seglen = dmamap->dm_segs[seg].ds_len,
   9109 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   9110 			     seglen != 0;
   9111 			     curaddr += curlen, seglen -= curlen,
   9112 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   9113 				curlen = seglen;
   9114 
   9115 				/*
   9116 				 * So says the Linux driver:
   9117 				 * Work around for premature descriptor
   9118 				 * write-backs in TSO mode.  Append a
   9119 				 * 4-byte sentinel descriptor.
   9120 				 */
   9121 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   9122 				    curlen > 8)
   9123 					curlen -= 4;
   9124 
   9125 				wm_set_dma_addr(
   9126 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   9127 				txq->txq_descs[nexttx].wtx_cmdlen
   9128 				    = htole32(cksumcmd | curlen);
   9129 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   9130 				    = 0;
   9131 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   9132 				    = cksumfields;
   9133 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   9134 				lasttx = nexttx;
   9135 
   9136 				DPRINTF(sc, WM_DEBUG_TX,
   9137 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   9138 					"len %#04zx\n",
   9139 					device_xname(sc->sc_dev), nexttx,
   9140 					(uint64_t)curaddr, curlen));
   9141 			}
   9142 		}
   9143 
   9144 		KASSERT(lasttx != -1);
   9145 
   9146 		/*
   9147 		 * Set up the command byte on the last descriptor of
   9148 		 * the packet. If we're in the interrupt delay window,
   9149 		 * delay the interrupt.
   9150 		 */
   9151 		txq->txq_descs[lasttx].wtx_cmdlen |=
   9152 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   9153 
   9154 		/*
   9155 		 * If VLANs are enabled and the packet has a VLAN tag, set
   9156 		 * up the descriptor to encapsulate the packet for us.
   9157 		 *
   9158 		 * This is only valid on the last descriptor of the packet.
   9159 		 */
   9160 		if (vlan_has_tag(m0)) {
   9161 			txq->txq_descs[lasttx].wtx_cmdlen |=
   9162 			    htole32(WTX_CMD_VLE);
   9163 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   9164 			    = htole16(vlan_get_tag(m0));
   9165 		}
   9166 
   9167 		txs->txs_lastdesc = lasttx;
   9168 
   9169 		DPRINTF(sc, WM_DEBUG_TX,
   9170 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   9171 			device_xname(sc->sc_dev),
   9172 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   9173 
   9174 		/* Sync the descriptors we're using. */
   9175 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   9176 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   9177 
   9178 		/* Give the packet to the chip. */
   9179 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   9180 
   9181 		DPRINTF(sc, WM_DEBUG_TX,
   9182 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   9183 
   9184 		DPRINTF(sc, WM_DEBUG_TX,
   9185 		    ("%s: TX: finished transmitting packet, job %d\n",
   9186 			device_xname(sc->sc_dev), txq->txq_snext));
   9187 
   9188 		/* Advance the tx pointer. */
   9189 		txq->txq_free -= txs->txs_ndesc;
   9190 		txq->txq_next = nexttx;
   9191 
   9192 		txq->txq_sfree--;
   9193 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   9194 
   9195 		/* Pass the packet to any BPF listeners. */
   9196 		bpf_mtap(ifp, m0, BPF_D_OUT);
   9197 	}
   9198 
   9199 	if (m0 != NULL) {
   9200 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9201 		WM_Q_EVCNT_INCR(txq, descdrop);
   9202 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   9203 			__func__));
   9204 		m_freem(m0);
   9205 	}
   9206 
   9207 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   9208 		/* No more slots; notify upper layer. */
   9209 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9210 	}
   9211 
   9212 	if (txq->txq_free != ofree) {
   9213 		/* Set a watchdog timer in case the chip flakes out. */
   9214 		txq->txq_lastsent = time_uptime;
   9215 		txq->txq_sending = true;
   9216 	}
   9217 }
   9218 
   9219 /*
   9220  * wm_nq_tx_offload:
   9221  *
   9222  *	Set up TCP/IP checksumming parameters for the
   9223  *	specified packet, for NEWQUEUE devices
   9224  */
   9225 static void
   9226 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   9227     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   9228 {
   9229 	struct mbuf *m0 = txs->txs_mbuf;
   9230 	uint32_t vl_len, mssidx, cmdc;
   9231 	struct ether_header *eh;
   9232 	int offset, iphl;
   9233 
   9234 	/*
   9235 	 * XXX It would be nice if the mbuf pkthdr had offset
   9236 	 * fields for the protocol headers.
   9237 	 */
   9238 	*cmdlenp = 0;
   9239 	*fieldsp = 0;
   9240 
   9241 	eh = mtod(m0, struct ether_header *);
   9242 	switch (htons(eh->ether_type)) {
   9243 	case ETHERTYPE_IP:
   9244 	case ETHERTYPE_IPV6:
   9245 		offset = ETHER_HDR_LEN;
   9246 		break;
   9247 
   9248 	case ETHERTYPE_VLAN:
   9249 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   9250 		break;
   9251 
   9252 	default:
   9253 		/* Don't support this protocol or encapsulation. */
   9254 		*do_csum = false;
   9255 		return;
   9256 	}
   9257 	*do_csum = true;
   9258 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   9259 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   9260 
   9261 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   9262 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   9263 
   9264 	if ((m0->m_pkthdr.csum_flags &
   9265 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   9266 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   9267 	} else {
   9268 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   9269 	}
   9270 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   9271 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   9272 
   9273 	if (vlan_has_tag(m0)) {
   9274 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   9275 		    << NQTXC_VLLEN_VLAN_SHIFT);
   9276 		*cmdlenp |= NQTX_CMD_VLE;
   9277 	}
   9278 
   9279 	mssidx = 0;
   9280 
   9281 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   9282 		int hlen = offset + iphl;
   9283 		int tcp_hlen;
   9284 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   9285 
   9286 		if (__predict_false(m0->m_len <
   9287 				    (hlen + sizeof(struct tcphdr)))) {
   9288 			/*
   9289 			 * TCP/IP headers are not in the first mbuf; we need
   9290 			 * to do this the slow and painful way. Let's just
   9291 			 * hope this doesn't happen very often.
   9292 			 */
   9293 			struct tcphdr th;
   9294 
   9295 			WM_Q_EVCNT_INCR(txq, tsopain);
   9296 
   9297 			m_copydata(m0, hlen, sizeof(th), &th);
   9298 			if (v4) {
   9299 				struct ip ip;
   9300 
   9301 				m_copydata(m0, offset, sizeof(ip), &ip);
   9302 				ip.ip_len = 0;
   9303 				m_copyback(m0,
   9304 				    offset + offsetof(struct ip, ip_len),
   9305 				    sizeof(ip.ip_len), &ip.ip_len);
   9306 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   9307 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   9308 			} else {
   9309 				struct ip6_hdr ip6;
   9310 
   9311 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   9312 				ip6.ip6_plen = 0;
   9313 				m_copyback(m0,
   9314 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   9315 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   9316 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   9317 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   9318 			}
   9319 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   9320 			    sizeof(th.th_sum), &th.th_sum);
   9321 
   9322 			tcp_hlen = th.th_off << 2;
   9323 		} else {
   9324 			/*
   9325 			 * TCP/IP headers are in the first mbuf; we can do
   9326 			 * this the easy way.
   9327 			 */
   9328 			struct tcphdr *th;
   9329 
   9330 			if (v4) {
   9331 				struct ip *ip =
   9332 				    (void *)(mtod(m0, char *) + offset);
   9333 				th = (void *)(mtod(m0, char *) + hlen);
   9334 
   9335 				ip->ip_len = 0;
   9336 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   9337 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   9338 			} else {
   9339 				struct ip6_hdr *ip6 =
   9340 				    (void *)(mtod(m0, char *) + offset);
   9341 				th = (void *)(mtod(m0, char *) + hlen);
   9342 
   9343 				ip6->ip6_plen = 0;
   9344 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   9345 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   9346 			}
   9347 			tcp_hlen = th->th_off << 2;
   9348 		}
   9349 		hlen += tcp_hlen;
   9350 		*cmdlenp |= NQTX_CMD_TSE;
   9351 
   9352 		if (v4) {
   9353 			WM_Q_EVCNT_INCR(txq, tso);
   9354 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   9355 		} else {
   9356 			WM_Q_EVCNT_INCR(txq, tso6);
   9357 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   9358 		}
   9359 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   9360 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   9361 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   9362 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   9363 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   9364 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   9365 	} else {
   9366 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   9367 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   9368 	}
   9369 
   9370 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   9371 		*fieldsp |= NQTXD_FIELDS_IXSM;
   9372 		cmdc |= NQTXC_CMD_IP4;
   9373 	}
   9374 
   9375 	if (m0->m_pkthdr.csum_flags &
   9376 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   9377 		WM_Q_EVCNT_INCR(txq, tusum);
   9378 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   9379 			cmdc |= NQTXC_CMD_TCP;
   9380 		else
   9381 			cmdc |= NQTXC_CMD_UDP;
   9382 
   9383 		cmdc |= NQTXC_CMD_IP4;
   9384 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   9385 	}
   9386 	if (m0->m_pkthdr.csum_flags &
   9387 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   9388 		WM_Q_EVCNT_INCR(txq, tusum6);
   9389 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   9390 			cmdc |= NQTXC_CMD_TCP;
   9391 		else
   9392 			cmdc |= NQTXC_CMD_UDP;
   9393 
   9394 		cmdc |= NQTXC_CMD_IP6;
   9395 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   9396 	}
   9397 
   9398 	/*
   9399 	 * We don't have to write context descriptor for every packet to
   9400 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   9401 	 * I210 and I211. It is enough to write once per a Tx queue for these
   9402 	 * controllers.
   9403 	 * It would be overhead to write context descriptor for every packet,
   9404 	 * however it does not cause problems.
   9405 	 */
   9406 	/* Fill in the context descriptor. */
   9407 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_vl_len =
   9408 	    htole32(vl_len);
   9409 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_sn = 0;
   9410 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_cmd =
   9411 	    htole32(cmdc);
   9412 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_mssidx =
   9413 	    htole32(mssidx);
   9414 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   9415 	DPRINTF(sc, WM_DEBUG_TX,
   9416 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   9417 		txq->txq_next, 0, vl_len));
   9418 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   9419 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   9420 	txs->txs_ndesc++;
   9421 }
   9422 
   9423 /*
   9424  * wm_nq_start:		[ifnet interface function]
   9425  *
   9426  *	Start packet transmission on the interface for NEWQUEUE devices
   9427  */
   9428 static void
   9429 wm_nq_start(struct ifnet *ifp)
   9430 {
   9431 	struct wm_softc *sc = ifp->if_softc;
   9432 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   9433 
   9434 	KASSERT(if_is_mpsafe(ifp));
   9435 	/*
   9436 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   9437 	 */
   9438 
   9439 	mutex_enter(txq->txq_lock);
   9440 	if (!txq->txq_stopping)
   9441 		wm_nq_start_locked(ifp);
   9442 	mutex_exit(txq->txq_lock);
   9443 }
   9444 
   9445 static void
   9446 wm_nq_start_locked(struct ifnet *ifp)
   9447 {
   9448 	struct wm_softc *sc = ifp->if_softc;
   9449 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   9450 
   9451 	wm_nq_send_common_locked(ifp, txq, false);
   9452 }
   9453 
   9454 static int
   9455 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   9456 {
   9457 	int qid;
   9458 	struct wm_softc *sc = ifp->if_softc;
   9459 	struct wm_txqueue *txq;
   9460 
   9461 	qid = wm_select_txqueue(ifp, m);
   9462 	txq = &sc->sc_queue[qid].wmq_txq;
   9463 
   9464 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   9465 		m_freem(m);
   9466 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   9467 		return ENOBUFS;
   9468 	}
   9469 
   9470 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   9471 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   9472 	if (m->m_flags & M_MCAST)
   9473 		if_statinc_ref(nsr, if_omcasts);
   9474 	IF_STAT_PUTREF(ifp);
   9475 
   9476 	/*
   9477 	 * The situations which this mutex_tryenter() fails at running time
   9478 	 * are below two patterns.
   9479 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   9480 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   9481 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   9482 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   9483 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   9484 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   9485 	 * stuck, either.
   9486 	 */
   9487 	if (mutex_tryenter(txq->txq_lock)) {
   9488 		if (!txq->txq_stopping)
   9489 			wm_nq_transmit_locked(ifp, txq);
   9490 		mutex_exit(txq->txq_lock);
   9491 	}
   9492 
   9493 	return 0;
   9494 }
   9495 
   9496 static void
   9497 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   9498 {
   9499 
   9500 	wm_nq_send_common_locked(ifp, txq, true);
   9501 }
   9502 
   9503 static void
   9504 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   9505     bool is_transmit)
   9506 {
   9507 	struct wm_softc *sc = ifp->if_softc;
   9508 	struct mbuf *m0;
   9509 	struct wm_txsoft *txs;
   9510 	bus_dmamap_t dmamap;
   9511 	int error, nexttx, lasttx = -1, seg, segs_needed;
   9512 	bool do_csum, sent;
   9513 	bool remap = true;
   9514 
   9515 	KASSERT(mutex_owned(txq->txq_lock));
   9516 	KASSERT(!txq->txq_stopping);
   9517 
   9518 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   9519 		return;
   9520 
   9521 	if (__predict_false(wm_linkdown_discard(txq))) {
   9522 		do {
   9523 			if (is_transmit)
   9524 				m0 = pcq_get(txq->txq_interq);
   9525 			else
   9526 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   9527 			/*
   9528 			 * increment successed packet counter as in the case
   9529 			 * which the packet is discarded by link down PHY.
   9530 			 */
   9531 			if (m0 != NULL) {
   9532 				if_statinc(ifp, if_opackets);
   9533 				m_freem(m0);
   9534 			}
   9535 		} while (m0 != NULL);
   9536 		return;
   9537 	}
   9538 
   9539 	sent = false;
   9540 
   9541 	/*
   9542 	 * Loop through the send queue, setting up transmit descriptors
   9543 	 * until we drain the queue, or use up all available transmit
   9544 	 * descriptors.
   9545 	 */
   9546 	for (;;) {
   9547 		m0 = NULL;
   9548 
   9549 		/* Get a work queue entry. */
   9550 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   9551 			wm_txeof(txq, UINT_MAX);
   9552 			if (txq->txq_sfree == 0) {
   9553 				DPRINTF(sc, WM_DEBUG_TX,
   9554 				    ("%s: TX: no free job descriptors\n",
   9555 					device_xname(sc->sc_dev)));
   9556 				WM_Q_EVCNT_INCR(txq, txsstall);
   9557 				break;
   9558 			}
   9559 		}
   9560 
   9561 		/* Grab a packet off the queue. */
   9562 		if (is_transmit)
   9563 			m0 = pcq_get(txq->txq_interq);
   9564 		else
   9565 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   9566 		if (m0 == NULL)
   9567 			break;
   9568 
   9569 		DPRINTF(sc, WM_DEBUG_TX,
   9570 		    ("%s: TX: have packet to transmit: %p\n",
   9571 			device_xname(sc->sc_dev), m0));
   9572 
   9573 		txs = &txq->txq_soft[txq->txq_snext];
   9574 		dmamap = txs->txs_dmamap;
   9575 
   9576 		/*
   9577 		 * Load the DMA map.  If this fails, the packet either
   9578 		 * didn't fit in the allotted number of segments, or we
   9579 		 * were short on resources.  For the too-many-segments
   9580 		 * case, we simply report an error and drop the packet,
   9581 		 * since we can't sanely copy a jumbo packet to a single
   9582 		 * buffer.
   9583 		 */
   9584 retry:
   9585 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   9586 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   9587 		if (__predict_false(error)) {
   9588 			if (error == EFBIG) {
   9589 				if (remap == true) {
   9590 					struct mbuf *m;
   9591 
   9592 					remap = false;
   9593 					m = m_defrag(m0, M_NOWAIT);
   9594 					if (m != NULL) {
   9595 						WM_Q_EVCNT_INCR(txq, defrag);
   9596 						m0 = m;
   9597 						goto retry;
   9598 					}
   9599 				}
   9600 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   9601 				log(LOG_ERR, "%s: Tx packet consumes too many "
   9602 				    "DMA segments, dropping...\n",
   9603 				    device_xname(sc->sc_dev));
   9604 				wm_dump_mbuf_chain(sc, m0);
   9605 				m_freem(m0);
   9606 				continue;
   9607 			}
   9608 			/* Short on resources, just stop for now. */
   9609 			DPRINTF(sc, WM_DEBUG_TX,
   9610 			    ("%s: TX: dmamap load failed: %d\n",
   9611 				device_xname(sc->sc_dev), error));
   9612 			break;
   9613 		}
   9614 
   9615 		segs_needed = dmamap->dm_nsegs;
   9616 
   9617 		/*
   9618 		 * Ensure we have enough descriptors free to describe
   9619 		 * the packet. Note, we always reserve one descriptor
   9620 		 * at the end of the ring due to the semantics of the
   9621 		 * TDT register, plus one more in the event we need
   9622 		 * to load offload context.
   9623 		 */
   9624 		if (segs_needed > txq->txq_free - 2) {
   9625 			/*
   9626 			 * Not enough free descriptors to transmit this
   9627 			 * packet.  We haven't committed anything yet,
   9628 			 * so just unload the DMA map, put the packet
   9629 			 * pack on the queue, and punt. Notify the upper
   9630 			 * layer that there are no more slots left.
   9631 			 */
   9632 			DPRINTF(sc, WM_DEBUG_TX,
   9633 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   9634 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   9635 				segs_needed, txq->txq_free - 1));
   9636 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   9637 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   9638 			WM_Q_EVCNT_INCR(txq, txdstall);
   9639 			break;
   9640 		}
   9641 
   9642 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   9643 
   9644 		DPRINTF(sc, WM_DEBUG_TX,
   9645 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   9646 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   9647 
   9648 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   9649 
   9650 		/*
   9651 		 * Store a pointer to the packet so that we can free it
   9652 		 * later.
   9653 		 *
   9654 		 * Initially, we consider the number of descriptors the
   9655 		 * packet uses the number of DMA segments.  This may be
   9656 		 * incremented by 1 if we do checksum offload (a descriptor
   9657 		 * is used to set the checksum context).
   9658 		 */
   9659 		txs->txs_mbuf = m0;
   9660 		txs->txs_firstdesc = txq->txq_next;
   9661 		txs->txs_ndesc = segs_needed;
   9662 
   9663 		/* Set up offload parameters for this packet. */
   9664 		uint32_t cmdlen, fields, dcmdlen;
   9665 		if (m0->m_pkthdr.csum_flags &
   9666 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   9667 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9668 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   9669 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   9670 			    &do_csum);
   9671 		} else {
   9672 			do_csum = false;
   9673 			cmdlen = 0;
   9674 			fields = 0;
   9675 		}
   9676 
   9677 		/* Sync the DMA map. */
   9678 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   9679 		    BUS_DMASYNC_PREWRITE);
   9680 
   9681 		/* Initialize the first transmit descriptor. */
   9682 		nexttx = txq->txq_next;
   9683 		if (!do_csum) {
   9684 			/* Set up a legacy descriptor */
   9685 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   9686 			    dmamap->dm_segs[0].ds_addr);
   9687 			txq->txq_descs[nexttx].wtx_cmdlen =
   9688 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   9689 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   9690 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   9691 			if (vlan_has_tag(m0)) {
   9692 				txq->txq_descs[nexttx].wtx_cmdlen |=
   9693 				    htole32(WTX_CMD_VLE);
   9694 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   9695 				    htole16(vlan_get_tag(m0));
   9696 			} else
   9697 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   9698 
   9699 			dcmdlen = 0;
   9700 		} else {
   9701 			/* Set up an advanced data descriptor */
   9702 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9703 			    htole64(dmamap->dm_segs[0].ds_addr);
   9704 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   9705 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9706 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   9707 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   9708 			    htole32(fields);
   9709 			DPRINTF(sc, WM_DEBUG_TX,
   9710 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   9711 				device_xname(sc->sc_dev), nexttx,
   9712 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   9713 			DPRINTF(sc, WM_DEBUG_TX,
   9714 			    ("\t 0x%08x%08x\n", fields,
   9715 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   9716 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   9717 		}
   9718 
   9719 		lasttx = nexttx;
   9720 		nexttx = WM_NEXTTX(txq, nexttx);
   9721 		/*
   9722 		 * Fill in the next descriptors. Legacy or advanced format
   9723 		 * is the same here.
   9724 		 */
   9725 		for (seg = 1; seg < dmamap->dm_nsegs;
   9726 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   9727 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9728 			    htole64(dmamap->dm_segs[seg].ds_addr);
   9729 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9730 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   9731 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   9732 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   9733 			lasttx = nexttx;
   9734 
   9735 			DPRINTF(sc, WM_DEBUG_TX,
   9736 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   9737 				device_xname(sc->sc_dev), nexttx,
   9738 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   9739 				dmamap->dm_segs[seg].ds_len));
   9740 		}
   9741 
   9742 		KASSERT(lasttx != -1);
   9743 
   9744 		/*
   9745 		 * Set up the command byte on the last descriptor of
   9746 		 * the packet. If we're in the interrupt delay window,
   9747 		 * delay the interrupt.
   9748 		 */
   9749 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   9750 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   9751 		txq->txq_descs[lasttx].wtx_cmdlen |=
   9752 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   9753 
   9754 		txs->txs_lastdesc = lasttx;
   9755 
   9756 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   9757 		    device_xname(sc->sc_dev),
   9758 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   9759 
   9760 		/* Sync the descriptors we're using. */
   9761 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   9762 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   9763 
   9764 		/* Give the packet to the chip. */
   9765 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   9766 		sent = true;
   9767 
   9768 		DPRINTF(sc, WM_DEBUG_TX,
   9769 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   9770 
   9771 		DPRINTF(sc, WM_DEBUG_TX,
   9772 		    ("%s: TX: finished transmitting packet, job %d\n",
   9773 			device_xname(sc->sc_dev), txq->txq_snext));
   9774 
   9775 		/* Advance the tx pointer. */
   9776 		txq->txq_free -= txs->txs_ndesc;
   9777 		txq->txq_next = nexttx;
   9778 
   9779 		txq->txq_sfree--;
   9780 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   9781 
   9782 		/* Pass the packet to any BPF listeners. */
   9783 		bpf_mtap(ifp, m0, BPF_D_OUT);
   9784 	}
   9785 
   9786 	if (m0 != NULL) {
   9787 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9788 		WM_Q_EVCNT_INCR(txq, descdrop);
   9789 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   9790 			__func__));
   9791 		m_freem(m0);
   9792 	}
   9793 
   9794 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   9795 		/* No more slots; notify upper layer. */
   9796 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9797 	}
   9798 
   9799 	if (sent) {
   9800 		/* Set a watchdog timer in case the chip flakes out. */
   9801 		txq->txq_lastsent = time_uptime;
   9802 		txq->txq_sending = true;
   9803 	}
   9804 }
   9805 
   9806 static void
   9807 wm_deferred_start_locked(struct wm_txqueue *txq)
   9808 {
   9809 	struct wm_softc *sc = txq->txq_sc;
   9810 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9811 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   9812 	int qid = wmq->wmq_id;
   9813 
   9814 	KASSERT(mutex_owned(txq->txq_lock));
   9815 	KASSERT(!txq->txq_stopping);
   9816 
   9817 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   9818 		/* XXX need for ALTQ or one CPU system */
   9819 		if (qid == 0)
   9820 			wm_nq_start_locked(ifp);
   9821 		wm_nq_transmit_locked(ifp, txq);
   9822 	} else {
   9823 		/* XXX need for ALTQ or one CPU system */
   9824 		if (qid == 0)
   9825 			wm_start_locked(ifp);
   9826 		wm_transmit_locked(ifp, txq);
   9827 	}
   9828 }
   9829 
   9830 /* Interrupt */
   9831 
   9832 /*
   9833  * wm_txeof:
   9834  *
   9835  *	Helper; handle transmit interrupts.
   9836  */
   9837 static bool
   9838 wm_txeof(struct wm_txqueue *txq, u_int limit)
   9839 {
   9840 	struct wm_softc *sc = txq->txq_sc;
   9841 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9842 	struct wm_txsoft *txs;
   9843 	int count = 0;
   9844 	int i;
   9845 	uint8_t status;
   9846 	bool more = false;
   9847 
   9848 	KASSERT(mutex_owned(txq->txq_lock));
   9849 
   9850 	if (txq->txq_stopping)
   9851 		return false;
   9852 
   9853 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   9854 
   9855 	/*
   9856 	 * Go through the Tx list and free mbufs for those
   9857 	 * frames which have been transmitted.
   9858 	 */
   9859 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   9860 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   9861 		txs = &txq->txq_soft[i];
   9862 
   9863 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   9864 			device_xname(sc->sc_dev), i));
   9865 
   9866 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   9867 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9868 
   9869 		status =
   9870 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   9871 		if ((status & WTX_ST_DD) == 0) {
   9872 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   9873 			    BUS_DMASYNC_PREREAD);
   9874 			break;
   9875 		}
   9876 
   9877 		if (limit-- == 0) {
   9878 			more = true;
   9879 			DPRINTF(sc, WM_DEBUG_TX,
   9880 			    ("%s: TX: loop limited, job %d is not processed\n",
   9881 				device_xname(sc->sc_dev), i));
   9882 			break;
   9883 		}
   9884 
   9885 		count++;
   9886 		DPRINTF(sc, WM_DEBUG_TX,
   9887 		    ("%s: TX: job %d done: descs %d..%d\n",
   9888 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   9889 		    txs->txs_lastdesc));
   9890 
   9891 #ifdef WM_EVENT_COUNTERS
   9892 		if ((status & WTX_ST_TU) && (sc->sc_type <= WM_T_82544))
   9893 			WM_Q_EVCNT_INCR(txq, underrun);
   9894 #endif /* WM_EVENT_COUNTERS */
   9895 
   9896 		/*
   9897 		 * 82574 and newer's document says the status field has neither
   9898 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   9899 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   9900 		 * Developer's Manual", 82574 datasheet and newer.
   9901 		 *
   9902 		 * XXX I saw the LC bit was set on I218 even though the media
   9903 		 * was full duplex, so the bit might be used for other
   9904 		 * meaning ...(I have no document).
   9905 		 */
   9906 
   9907 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   9908 		    && ((sc->sc_type < WM_T_82574)
   9909 			|| (sc->sc_type == WM_T_80003))) {
   9910 			if_statinc(ifp, if_oerrors);
   9911 			if (status & WTX_ST_LC)
   9912 				log(LOG_WARNING, "%s: late collision\n",
   9913 				    device_xname(sc->sc_dev));
   9914 			else if (status & WTX_ST_EC) {
   9915 				if_statadd(ifp, if_collisions,
   9916 				    TX_COLLISION_THRESHOLD + 1);
   9917 				log(LOG_WARNING, "%s: excessive collisions\n",
   9918 				    device_xname(sc->sc_dev));
   9919 			}
   9920 		} else
   9921 			if_statinc(ifp, if_opackets);
   9922 
   9923 		txq->txq_packets++;
   9924 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   9925 
   9926 		txq->txq_free += txs->txs_ndesc;
   9927 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   9928 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   9929 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   9930 		m_freem(txs->txs_mbuf);
   9931 		txs->txs_mbuf = NULL;
   9932 	}
   9933 
   9934 	/* Update the dirty transmit buffer pointer. */
   9935 	txq->txq_sdirty = i;
   9936 	DPRINTF(sc, WM_DEBUG_TX,
   9937 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   9938 
   9939 	if (count != 0)
   9940 		rnd_add_uint32(&sc->rnd_source, count);
   9941 
   9942 	/*
   9943 	 * If there are no more pending transmissions, cancel the watchdog
   9944 	 * timer.
   9945 	 */
   9946 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   9947 		txq->txq_sending = false;
   9948 
   9949 	return more;
   9950 }
   9951 
   9952 static inline uint32_t
   9953 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   9954 {
   9955 	struct wm_softc *sc = rxq->rxq_sc;
   9956 
   9957 	if (sc->sc_type == WM_T_82574)
   9958 		return EXTRXC_STATUS(
   9959 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9960 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9961 		return NQRXC_STATUS(
   9962 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9963 	else
   9964 		return rxq->rxq_descs[idx].wrx_status;
   9965 }
   9966 
   9967 static inline uint32_t
   9968 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   9969 {
   9970 	struct wm_softc *sc = rxq->rxq_sc;
   9971 
   9972 	if (sc->sc_type == WM_T_82574)
   9973 		return EXTRXC_ERROR(
   9974 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9975 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9976 		return NQRXC_ERROR(
   9977 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9978 	else
   9979 		return rxq->rxq_descs[idx].wrx_errors;
   9980 }
   9981 
   9982 static inline uint16_t
   9983 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   9984 {
   9985 	struct wm_softc *sc = rxq->rxq_sc;
   9986 
   9987 	if (sc->sc_type == WM_T_82574)
   9988 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   9989 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9990 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   9991 	else
   9992 		return rxq->rxq_descs[idx].wrx_special;
   9993 }
   9994 
   9995 static inline int
   9996 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   9997 {
   9998 	struct wm_softc *sc = rxq->rxq_sc;
   9999 
   10000 	if (sc->sc_type == WM_T_82574)
   10001 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   10002 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   10003 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   10004 	else
   10005 		return rxq->rxq_descs[idx].wrx_len;
   10006 }
   10007 
   10008 #ifdef WM_DEBUG
   10009 static inline uint32_t
   10010 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   10011 {
   10012 	struct wm_softc *sc = rxq->rxq_sc;
   10013 
   10014 	if (sc->sc_type == WM_T_82574)
   10015 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   10016 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   10017 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   10018 	else
   10019 		return 0;
   10020 }
   10021 
   10022 static inline uint8_t
   10023 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   10024 {
   10025 	struct wm_softc *sc = rxq->rxq_sc;
   10026 
   10027 	if (sc->sc_type == WM_T_82574)
   10028 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   10029 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   10030 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   10031 	else
   10032 		return 0;
   10033 }
   10034 #endif /* WM_DEBUG */
   10035 
   10036 static inline bool
   10037 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   10038     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   10039 {
   10040 
   10041 	if (sc->sc_type == WM_T_82574)
   10042 		return (status & ext_bit) != 0;
   10043 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   10044 		return (status & nq_bit) != 0;
   10045 	else
   10046 		return (status & legacy_bit) != 0;
   10047 }
   10048 
   10049 static inline bool
   10050 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   10051     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   10052 {
   10053 
   10054 	if (sc->sc_type == WM_T_82574)
   10055 		return (error & ext_bit) != 0;
   10056 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   10057 		return (error & nq_bit) != 0;
   10058 	else
   10059 		return (error & legacy_bit) != 0;
   10060 }
   10061 
   10062 static inline bool
   10063 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   10064 {
   10065 
   10066 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   10067 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   10068 		return true;
   10069 	else
   10070 		return false;
   10071 }
   10072 
   10073 static inline bool
   10074 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   10075 {
   10076 	struct wm_softc *sc = rxq->rxq_sc;
   10077 
   10078 	/* XXX missing error bit for newqueue? */
   10079 	if (wm_rxdesc_is_set_error(sc, errors,
   10080 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   10081 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   10082 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   10083 		NQRXC_ERROR_RXE)) {
   10084 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   10085 		    EXTRXC_ERROR_SE, 0))
   10086 			log(LOG_WARNING, "%s: symbol error\n",
   10087 			    device_xname(sc->sc_dev));
   10088 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   10089 		    EXTRXC_ERROR_SEQ, 0))
   10090 			log(LOG_WARNING, "%s: receive sequence error\n",
   10091 			    device_xname(sc->sc_dev));
   10092 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   10093 		    EXTRXC_ERROR_CE, 0))
   10094 			log(LOG_WARNING, "%s: CRC error\n",
   10095 			    device_xname(sc->sc_dev));
   10096 		return true;
   10097 	}
   10098 
   10099 	return false;
   10100 }
   10101 
   10102 static inline bool
   10103 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   10104 {
   10105 	struct wm_softc *sc = rxq->rxq_sc;
   10106 
   10107 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   10108 		NQRXC_STATUS_DD)) {
   10109 		/* We have processed all of the receive descriptors. */
   10110 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   10111 		return false;
   10112 	}
   10113 
   10114 	return true;
   10115 }
   10116 
   10117 static inline bool
   10118 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   10119     uint16_t vlantag, struct mbuf *m)
   10120 {
   10121 
   10122 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   10123 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   10124 		vlan_set_tag(m, le16toh(vlantag));
   10125 	}
   10126 
   10127 	return true;
   10128 }
   10129 
   10130 static inline void
   10131 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   10132     uint32_t errors, struct mbuf *m)
   10133 {
   10134 	struct wm_softc *sc = rxq->rxq_sc;
   10135 
   10136 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   10137 		if (wm_rxdesc_is_set_status(sc, status,
   10138 		    WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   10139 			WM_Q_EVCNT_INCR(rxq, ipsum);
   10140 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   10141 			if (wm_rxdesc_is_set_error(sc, errors,
   10142 			    WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   10143 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   10144 		}
   10145 		if (wm_rxdesc_is_set_status(sc, status,
   10146 		    WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   10147 			/*
   10148 			 * Note: we don't know if this was TCP or UDP,
   10149 			 * so we just set both bits, and expect the
   10150 			 * upper layers to deal.
   10151 			 */
   10152 			WM_Q_EVCNT_INCR(rxq, tusum);
   10153 			m->m_pkthdr.csum_flags |=
   10154 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   10155 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   10156 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   10157 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   10158 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   10159 		}
   10160 	}
   10161 }
   10162 
   10163 /*
   10164  * wm_rxeof:
   10165  *
   10166  *	Helper; handle receive interrupts.
   10167  */
   10168 static bool
   10169 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   10170 {
   10171 	struct wm_softc *sc = rxq->rxq_sc;
   10172 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10173 	struct wm_rxsoft *rxs;
   10174 	struct mbuf *m;
   10175 	int i, len;
   10176 	int count = 0;
   10177 	uint32_t status, errors;
   10178 	uint16_t vlantag;
   10179 	bool more = false;
   10180 
   10181 	KASSERT(mutex_owned(rxq->rxq_lock));
   10182 
   10183 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   10184 		rxs = &rxq->rxq_soft[i];
   10185 
   10186 		DPRINTF(sc, WM_DEBUG_RX,
   10187 		    ("%s: RX: checking descriptor %d\n",
   10188 			device_xname(sc->sc_dev), i));
   10189 		wm_cdrxsync(rxq, i,
   10190 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   10191 
   10192 		status = wm_rxdesc_get_status(rxq, i);
   10193 		errors = wm_rxdesc_get_errors(rxq, i);
   10194 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   10195 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   10196 #ifdef WM_DEBUG
   10197 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   10198 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   10199 #endif
   10200 
   10201 		if (!wm_rxdesc_dd(rxq, i, status))
   10202 			break;
   10203 
   10204 		if (limit-- == 0) {
   10205 			more = true;
   10206 			DPRINTF(sc, WM_DEBUG_RX,
   10207 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   10208 				device_xname(sc->sc_dev), i));
   10209 			break;
   10210 		}
   10211 
   10212 		count++;
   10213 		if (__predict_false(rxq->rxq_discard)) {
   10214 			DPRINTF(sc, WM_DEBUG_RX,
   10215 			    ("%s: RX: discarding contents of descriptor %d\n",
   10216 				device_xname(sc->sc_dev), i));
   10217 			wm_init_rxdesc(rxq, i);
   10218 			if (wm_rxdesc_is_eop(rxq, status)) {
   10219 				/* Reset our state. */
   10220 				DPRINTF(sc, WM_DEBUG_RX,
   10221 				    ("%s: RX: resetting rxdiscard -> 0\n",
   10222 					device_xname(sc->sc_dev)));
   10223 				rxq->rxq_discard = 0;
   10224 			}
   10225 			continue;
   10226 		}
   10227 
   10228 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   10229 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   10230 
   10231 		m = rxs->rxs_mbuf;
   10232 
   10233 		/*
   10234 		 * Add a new receive buffer to the ring, unless of
   10235 		 * course the length is zero. Treat the latter as a
   10236 		 * failed mapping.
   10237 		 */
   10238 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   10239 			/*
   10240 			 * Failed, throw away what we've done so
   10241 			 * far, and discard the rest of the packet.
   10242 			 */
   10243 			if_statinc(ifp, if_ierrors);
   10244 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   10245 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   10246 			wm_init_rxdesc(rxq, i);
   10247 			if (!wm_rxdesc_is_eop(rxq, status))
   10248 				rxq->rxq_discard = 1;
   10249 			if (rxq->rxq_head != NULL)
   10250 				m_freem(rxq->rxq_head);
   10251 			WM_RXCHAIN_RESET(rxq);
   10252 			DPRINTF(sc, WM_DEBUG_RX,
   10253 			    ("%s: RX: Rx buffer allocation failed, "
   10254 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   10255 				rxq->rxq_discard ? " (discard)" : ""));
   10256 			continue;
   10257 		}
   10258 
   10259 		m->m_len = len;
   10260 		rxq->rxq_len += len;
   10261 		DPRINTF(sc, WM_DEBUG_RX,
   10262 		    ("%s: RX: buffer at %p len %d\n",
   10263 			device_xname(sc->sc_dev), m->m_data, len));
   10264 
   10265 		/* If this is not the end of the packet, keep looking. */
   10266 		if (!wm_rxdesc_is_eop(rxq, status)) {
   10267 			WM_RXCHAIN_LINK(rxq, m);
   10268 			DPRINTF(sc, WM_DEBUG_RX,
   10269 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   10270 				device_xname(sc->sc_dev), rxq->rxq_len));
   10271 			continue;
   10272 		}
   10273 
   10274 		/*
   10275 		 * Okay, we have the entire packet now. The chip is
   10276 		 * configured to include the FCS except I35[04], I21[01].
   10277 		 * (not all chips can be configured to strip it), so we need
   10278 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
   10279 		 * in RCTL register is always set, so we don't trim it.
   10280 		 * PCH2 and newer chip also not include FCS when jumbo
   10281 		 * frame is used to do workaround an errata.
   10282 		 * May need to adjust length of previous mbuf in the
   10283 		 * chain if the current mbuf is too short.
   10284 		 */
   10285 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
   10286 			if (m->m_len < ETHER_CRC_LEN) {
   10287 				rxq->rxq_tail->m_len
   10288 				    -= (ETHER_CRC_LEN - m->m_len);
   10289 				m->m_len = 0;
   10290 			} else
   10291 				m->m_len -= ETHER_CRC_LEN;
   10292 			len = rxq->rxq_len - ETHER_CRC_LEN;
   10293 		} else
   10294 			len = rxq->rxq_len;
   10295 
   10296 		WM_RXCHAIN_LINK(rxq, m);
   10297 
   10298 		*rxq->rxq_tailp = NULL;
   10299 		m = rxq->rxq_head;
   10300 
   10301 		WM_RXCHAIN_RESET(rxq);
   10302 
   10303 		DPRINTF(sc, WM_DEBUG_RX,
   10304 		    ("%s: RX: have entire packet, len -> %d\n",
   10305 			device_xname(sc->sc_dev), len));
   10306 
   10307 		/* If an error occurred, update stats and drop the packet. */
   10308 		if (wm_rxdesc_has_errors(rxq, errors)) {
   10309 			m_freem(m);
   10310 			continue;
   10311 		}
   10312 
   10313 		/* No errors.  Receive the packet. */
   10314 		m_set_rcvif(m, ifp);
   10315 		m->m_pkthdr.len = len;
   10316 		/*
   10317 		 * TODO
   10318 		 * should be save rsshash and rsstype to this mbuf.
   10319 		 */
   10320 		DPRINTF(sc, WM_DEBUG_RX,
   10321 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   10322 			device_xname(sc->sc_dev), rsstype, rsshash));
   10323 
   10324 		/*
   10325 		 * If VLANs are enabled, VLAN packets have been unwrapped
   10326 		 * for us.  Associate the tag with the packet.
   10327 		 */
   10328 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   10329 			continue;
   10330 
   10331 		/* Set up checksum info for this packet. */
   10332 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   10333 
   10334 		rxq->rxq_packets++;
   10335 		rxq->rxq_bytes += len;
   10336 		/* Pass it on. */
   10337 		if_percpuq_enqueue(sc->sc_ipq, m);
   10338 
   10339 		if (rxq->rxq_stopping)
   10340 			break;
   10341 	}
   10342 	rxq->rxq_ptr = i;
   10343 
   10344 	if (count != 0)
   10345 		rnd_add_uint32(&sc->rnd_source, count);
   10346 
   10347 	DPRINTF(sc, WM_DEBUG_RX,
   10348 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   10349 
   10350 	return more;
   10351 }
   10352 
   10353 /*
   10354  * wm_linkintr_gmii:
   10355  *
   10356  *	Helper; handle link interrupts for GMII.
   10357  */
   10358 static void
   10359 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   10360 {
   10361 	device_t dev = sc->sc_dev;
   10362 	uint32_t status, reg;
   10363 	bool link;
   10364 	bool dopoll = true;
   10365 	int rv;
   10366 
   10367 	KASSERT(mutex_owned(sc->sc_core_lock));
   10368 
   10369 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   10370 		__func__));
   10371 
   10372 	if ((icr & ICR_LSC) == 0) {
   10373 		if (icr & ICR_RXSEQ)
   10374 			DPRINTF(sc, WM_DEBUG_LINK,
   10375 			    ("%s: LINK Receive sequence error\n",
   10376 				device_xname(dev)));
   10377 		return;
   10378 	}
   10379 
   10380 	/* Link status changed */
   10381 	status = CSR_READ(sc, WMREG_STATUS);
   10382 	link = status & STATUS_LU;
   10383 	if (link) {
   10384 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   10385 			device_xname(dev),
   10386 			(status & STATUS_FD) ? "FDX" : "HDX"));
   10387 		if (wm_phy_need_linkdown_discard(sc)) {
   10388 			DPRINTF(sc, WM_DEBUG_LINK,
   10389 			    ("%s: linkintr: Clear linkdown discard flag\n",
   10390 				device_xname(dev)));
   10391 			wm_clear_linkdown_discard(sc);
   10392 		}
   10393 	} else {
   10394 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10395 			device_xname(dev)));
   10396 		if (wm_phy_need_linkdown_discard(sc)) {
   10397 			DPRINTF(sc, WM_DEBUG_LINK,
   10398 			    ("%s: linkintr: Set linkdown discard flag\n",
   10399 				device_xname(dev)));
   10400 			wm_set_linkdown_discard(sc);
   10401 		}
   10402 	}
   10403 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   10404 		wm_gig_downshift_workaround_ich8lan(sc);
   10405 
   10406 	if ((sc->sc_type == WM_T_ICH8) && (sc->sc_phytype == WMPHY_IGP_3))
   10407 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   10408 
   10409 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   10410 		device_xname(dev)));
   10411 	if ((sc->sc_flags & WM_F_DELAY_LINKUP) != 0) {
   10412 		if (link) {
   10413 			/*
   10414 			 * To workaround the problem, it's required to wait
   10415 			 * several hundred miliseconds. The time depend
   10416 			 * on the environment. Wait 1 second for the safety.
   10417 			 */
   10418 			dopoll = false;
   10419 			getmicrotime(&sc->sc_linkup_delay_time);
   10420 			sc->sc_linkup_delay_time.tv_sec += 1;
   10421 		} else if (sc->sc_linkup_delay_time.tv_sec != 0) {
   10422 			/*
   10423 			 * Simplify by checking tv_sec only. It's enough.
   10424 			 *
   10425 			 * Currently, it's not required to clear the time.
   10426 			 * It's just to know the timer is stopped
   10427 			 * (for debugging).
   10428 			 */
   10429 
   10430 			sc->sc_linkup_delay_time.tv_sec = 0;
   10431 			sc->sc_linkup_delay_time.tv_usec = 0;
   10432 		}
   10433 	}
   10434 
   10435 	/*
   10436 	 * Call mii_pollstat().
   10437 	 *
   10438 	 * Some (not all) systems use I35[04] or I21[01] don't send packet soon
   10439 	 * after linkup. The MAC send a packet to the PHY and any error is not
   10440 	 * observed. This behavior causes a problem that gratuitous ARP and/or
   10441 	 * IPv6 DAD packet are silently dropped. To avoid this problem, don't
   10442 	 * call mii_pollstat() here which will send LINK_STATE_UP notification
   10443 	 * to the upper layer. Instead, mii_pollstat() will be called in
   10444 	 * wm_gmii_mediastatus() or mii_tick() will be called in wm_tick().
   10445 	 */
   10446 	if (dopoll)
   10447 		mii_pollstat(&sc->sc_mii);
   10448 
   10449 	/* Do some workarounds soon after link status is changed. */
   10450 
   10451 	if (sc->sc_type == WM_T_82543) {
   10452 		int miistatus, active;
   10453 
   10454 		/*
   10455 		 * With 82543, we need to force speed and
   10456 		 * duplex on the MAC equal to what the PHY
   10457 		 * speed and duplex configuration is.
   10458 		 */
   10459 		miistatus = sc->sc_mii.mii_media_status;
   10460 
   10461 		if (miistatus & IFM_ACTIVE) {
   10462 			active = sc->sc_mii.mii_media_active;
   10463 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10464 			switch (IFM_SUBTYPE(active)) {
   10465 			case IFM_10_T:
   10466 				sc->sc_ctrl |= CTRL_SPEED_10;
   10467 				break;
   10468 			case IFM_100_TX:
   10469 				sc->sc_ctrl |= CTRL_SPEED_100;
   10470 				break;
   10471 			case IFM_1000_T:
   10472 				sc->sc_ctrl |= CTRL_SPEED_1000;
   10473 				break;
   10474 			default:
   10475 				/*
   10476 				 * Fiber?
   10477 				 * Shoud not enter here.
   10478 				 */
   10479 				device_printf(dev, "unknown media (%x)\n",
   10480 				    active);
   10481 				break;
   10482 			}
   10483 			if (active & IFM_FDX)
   10484 				sc->sc_ctrl |= CTRL_FD;
   10485 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10486 		}
   10487 	} else if (sc->sc_type == WM_T_PCH) {
   10488 		wm_k1_gig_workaround_hv(sc,
   10489 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   10490 	}
   10491 
   10492 	/*
   10493 	 * When connected at 10Mbps half-duplex, some parts are excessively
   10494 	 * aggressive resulting in many collisions. To avoid this, increase
   10495 	 * the IPG and reduce Rx latency in the PHY.
   10496 	 */
   10497 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_TGP)
   10498 	    && link) {
   10499 		uint32_t tipg_reg;
   10500 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   10501 		bool fdx;
   10502 		uint16_t emi_addr, emi_val;
   10503 
   10504 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   10505 		tipg_reg &= ~TIPG_IPGT_MASK;
   10506 		fdx = status & STATUS_FD;
   10507 
   10508 		if (!fdx && (speed == STATUS_SPEED_10)) {
   10509 			tipg_reg |= 0xff;
   10510 			/* Reduce Rx latency in analog PHY */
   10511 			emi_val = 0;
   10512 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   10513 		    fdx && speed != STATUS_SPEED_1000) {
   10514 			tipg_reg |= 0xc;
   10515 			emi_val = 1;
   10516 		} else {
   10517 			/* Roll back the default values */
   10518 			tipg_reg |= 0x08;
   10519 			emi_val = 1;
   10520 		}
   10521 
   10522 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   10523 
   10524 		rv = sc->phy.acquire(sc);
   10525 		if (rv)
   10526 			return;
   10527 
   10528 		if (sc->sc_type == WM_T_PCH2)
   10529 			emi_addr = I82579_RX_CONFIG;
   10530 		else
   10531 			emi_addr = I217_RX_CONFIG;
   10532 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   10533 
   10534 		if (sc->sc_type >= WM_T_PCH_LPT) {
   10535 			uint16_t phy_reg;
   10536 
   10537 			sc->phy.readreg_locked(dev, 2,
   10538 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   10539 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   10540 			if (speed == STATUS_SPEED_100
   10541 			    || speed == STATUS_SPEED_10)
   10542 				phy_reg |= 0x3e8;
   10543 			else
   10544 				phy_reg |= 0xfa;
   10545 			sc->phy.writereg_locked(dev, 2,
   10546 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   10547 
   10548 			if (speed == STATUS_SPEED_1000) {
   10549 				sc->phy.readreg_locked(dev, 2,
   10550 				    HV_PM_CTRL, &phy_reg);
   10551 
   10552 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   10553 
   10554 				sc->phy.writereg_locked(dev, 2,
   10555 				    HV_PM_CTRL, phy_reg);
   10556 			}
   10557 		}
   10558 		sc->phy.release(sc);
   10559 
   10560 		if (rv)
   10561 			return;
   10562 
   10563 		if (sc->sc_type >= WM_T_PCH_SPT) {
   10564 			uint16_t data, ptr_gap;
   10565 
   10566 			if (speed == STATUS_SPEED_1000) {
   10567 				rv = sc->phy.acquire(sc);
   10568 				if (rv)
   10569 					return;
   10570 
   10571 				rv = sc->phy.readreg_locked(dev, 2,
   10572 				    I82579_UNKNOWN1, &data);
   10573 				if (rv) {
   10574 					sc->phy.release(sc);
   10575 					return;
   10576 				}
   10577 
   10578 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   10579 				if (ptr_gap < 0x18) {
   10580 					data &= ~(0x3ff << 2);
   10581 					data |= (0x18 << 2);
   10582 					rv = sc->phy.writereg_locked(dev,
   10583 					    2, I82579_UNKNOWN1, data);
   10584 				}
   10585 				sc->phy.release(sc);
   10586 				if (rv)
   10587 					return;
   10588 			} else {
   10589 				rv = sc->phy.acquire(sc);
   10590 				if (rv)
   10591 					return;
   10592 
   10593 				rv = sc->phy.writereg_locked(dev, 2,
   10594 				    I82579_UNKNOWN1, 0xc023);
   10595 				sc->phy.release(sc);
   10596 				if (rv)
   10597 					return;
   10598 
   10599 			}
   10600 		}
   10601 	}
   10602 
   10603 	/*
   10604 	 * I217 Packet Loss issue:
   10605 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   10606 	 * on power up.
   10607 	 * Set the Beacon Duration for I217 to 8 usec
   10608 	 */
   10609 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10610 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   10611 		reg &= ~FEXTNVM4_BEACON_DURATION;
   10612 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   10613 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   10614 	}
   10615 
   10616 	/* Work-around I218 hang issue */
   10617 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   10618 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   10619 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   10620 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   10621 		wm_k1_workaround_lpt_lp(sc, link);
   10622 
   10623 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10624 		/*
   10625 		 * Set platform power management values for Latency
   10626 		 * Tolerance Reporting (LTR)
   10627 		 */
   10628 		wm_platform_pm_pch_lpt(sc,
   10629 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   10630 	}
   10631 
   10632 	/* Clear link partner's EEE ability */
   10633 	sc->eee_lp_ability = 0;
   10634 
   10635 	/* FEXTNVM6 K1-off workaround */
   10636 	if (sc->sc_type == WM_T_PCH_SPT) {
   10637 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   10638 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   10639 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   10640 		else
   10641 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   10642 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   10643 	}
   10644 
   10645 	if (!link)
   10646 		return;
   10647 
   10648 	switch (sc->sc_type) {
   10649 	case WM_T_PCH2:
   10650 		wm_k1_workaround_lv(sc);
   10651 		/* FALLTHROUGH */
   10652 	case WM_T_PCH:
   10653 		if (sc->sc_phytype == WMPHY_82578)
   10654 			wm_link_stall_workaround_hv(sc);
   10655 		break;
   10656 	default:
   10657 		break;
   10658 	}
   10659 
   10660 	/* Enable/Disable EEE after link up */
   10661 	if (sc->sc_phytype > WMPHY_82579)
   10662 		wm_set_eee_pchlan(sc);
   10663 }
   10664 
   10665 /*
   10666  * wm_linkintr_tbi:
   10667  *
   10668  *	Helper; handle link interrupts for TBI mode.
   10669  */
   10670 static void
   10671 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   10672 {
   10673 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10674 	uint32_t status;
   10675 
   10676 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10677 		__func__));
   10678 
   10679 	status = CSR_READ(sc, WMREG_STATUS);
   10680 	if (icr & ICR_LSC) {
   10681 		wm_check_for_link(sc);
   10682 		if (status & STATUS_LU) {
   10683 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   10684 				device_xname(sc->sc_dev),
   10685 				(status & STATUS_FD) ? "FDX" : "HDX"));
   10686 			/*
   10687 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10688 			 * so we should update sc->sc_ctrl
   10689 			 */
   10690 
   10691 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10692 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10693 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10694 			if (status & STATUS_FD)
   10695 				sc->sc_tctl |=
   10696 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10697 			else
   10698 				sc->sc_tctl |=
   10699 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10700 			if (sc->sc_ctrl & CTRL_TFCE)
   10701 				sc->sc_fcrtl |= FCRTL_XONE;
   10702 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10703 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10704 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   10705 			sc->sc_tbi_linkup = 1;
   10706 			if_link_state_change(ifp, LINK_STATE_UP);
   10707 		} else {
   10708 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10709 				device_xname(sc->sc_dev)));
   10710 			sc->sc_tbi_linkup = 0;
   10711 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10712 		}
   10713 		/* Update LED */
   10714 		wm_tbi_serdes_set_linkled(sc);
   10715 	} else if (icr & ICR_RXSEQ)
   10716 		DPRINTF(sc, WM_DEBUG_LINK,
   10717 		    ("%s: LINK: Receive sequence error\n",
   10718 			device_xname(sc->sc_dev)));
   10719 }
   10720 
   10721 /*
   10722  * wm_linkintr_serdes:
   10723  *
   10724  *	Helper; handle link interrupts for TBI mode.
   10725  */
   10726 static void
   10727 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   10728 {
   10729 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10730 	struct mii_data *mii = &sc->sc_mii;
   10731 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10732 	uint32_t pcs_adv, pcs_lpab, reg;
   10733 
   10734 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10735 		__func__));
   10736 
   10737 	if (icr & ICR_LSC) {
   10738 		/* Check PCS */
   10739 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10740 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   10741 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   10742 				device_xname(sc->sc_dev)));
   10743 			mii->mii_media_status |= IFM_ACTIVE;
   10744 			sc->sc_tbi_linkup = 1;
   10745 			if_link_state_change(ifp, LINK_STATE_UP);
   10746 		} else {
   10747 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10748 				device_xname(sc->sc_dev)));
   10749 			mii->mii_media_status |= IFM_NONE;
   10750 			sc->sc_tbi_linkup = 0;
   10751 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10752 			wm_tbi_serdes_set_linkled(sc);
   10753 			return;
   10754 		}
   10755 		mii->mii_media_active |= IFM_1000_SX;
   10756 		if ((reg & PCS_LSTS_FDX) != 0)
   10757 			mii->mii_media_active |= IFM_FDX;
   10758 		else
   10759 			mii->mii_media_active |= IFM_HDX;
   10760 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10761 			/* Check flow */
   10762 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10763 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10764 				DPRINTF(sc, WM_DEBUG_LINK,
   10765 				    ("XXX LINKOK but not ACOMP\n"));
   10766 				return;
   10767 			}
   10768 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10769 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10770 			DPRINTF(sc, WM_DEBUG_LINK,
   10771 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   10772 			if ((pcs_adv & TXCW_SYM_PAUSE)
   10773 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10774 				mii->mii_media_active |= IFM_FLOW
   10775 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10776 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10777 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10778 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   10779 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10780 				mii->mii_media_active |= IFM_FLOW
   10781 				    | IFM_ETH_TXPAUSE;
   10782 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   10783 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10784 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10785 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10786 				mii->mii_media_active |= IFM_FLOW
   10787 				    | IFM_ETH_RXPAUSE;
   10788 		}
   10789 		/* Update LED */
   10790 		wm_tbi_serdes_set_linkled(sc);
   10791 	} else
   10792 		DPRINTF(sc, WM_DEBUG_LINK,
   10793 		    ("%s: LINK: Receive sequence error\n",
   10794 		    device_xname(sc->sc_dev)));
   10795 }
   10796 
   10797 /*
   10798  * wm_linkintr:
   10799  *
   10800  *	Helper; handle link interrupts.
   10801  */
   10802 static void
   10803 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   10804 {
   10805 
   10806 	KASSERT(mutex_owned(sc->sc_core_lock));
   10807 
   10808 	if (sc->sc_flags & WM_F_HAS_MII)
   10809 		wm_linkintr_gmii(sc, icr);
   10810 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10811 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   10812 		wm_linkintr_serdes(sc, icr);
   10813 	else
   10814 		wm_linkintr_tbi(sc, icr);
   10815 }
   10816 
   10817 
   10818 static inline void
   10819 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   10820 {
   10821 
   10822 	if (wmq->wmq_txrx_use_workqueue) {
   10823 		if (!wmq->wmq_wq_enqueued) {
   10824 			wmq->wmq_wq_enqueued = true;
   10825 			workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie,
   10826 			    curcpu());
   10827 		}
   10828 	} else
   10829 		softint_schedule(wmq->wmq_si);
   10830 }
   10831 
   10832 static inline void
   10833 wm_legacy_intr_disable(struct wm_softc *sc)
   10834 {
   10835 
   10836 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   10837 }
   10838 
   10839 static inline void
   10840 wm_legacy_intr_enable(struct wm_softc *sc)
   10841 {
   10842 
   10843 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   10844 }
   10845 
   10846 /*
   10847  * wm_intr_legacy:
   10848  *
   10849  *	Interrupt service routine for INTx and MSI.
   10850  */
   10851 static int
   10852 wm_intr_legacy(void *arg)
   10853 {
   10854 	struct wm_softc *sc = arg;
   10855 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10856 	struct wm_queue *wmq = &sc->sc_queue[0];
   10857 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10858 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10859 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10860 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10861 	uint32_t icr, rndval = 0;
   10862 	bool more = false;
   10863 
   10864 	icr = CSR_READ(sc, WMREG_ICR);
   10865 	if ((icr & sc->sc_icr) == 0)
   10866 		return 0;
   10867 
   10868 	DPRINTF(sc, WM_DEBUG_TX,
   10869 	    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   10870 	if (rndval == 0)
   10871 		rndval = icr;
   10872 
   10873 	mutex_enter(txq->txq_lock);
   10874 
   10875 	if (txq->txq_stopping) {
   10876 		mutex_exit(txq->txq_lock);
   10877 		return 1;
   10878 	}
   10879 
   10880 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10881 	if (icr & ICR_TXDW) {
   10882 		DPRINTF(sc, WM_DEBUG_TX,
   10883 		    ("%s: TX: got TXDW interrupt\n",
   10884 			device_xname(sc->sc_dev)));
   10885 		WM_Q_EVCNT_INCR(txq, txdw);
   10886 	}
   10887 #endif
   10888 	if (txlimit > 0) {
   10889 		more |= wm_txeof(txq, txlimit);
   10890 		if (!IF_IS_EMPTY(&ifp->if_snd))
   10891 			more = true;
   10892 	} else
   10893 		more = true;
   10894 	mutex_exit(txq->txq_lock);
   10895 
   10896 	mutex_enter(rxq->rxq_lock);
   10897 
   10898 	if (rxq->rxq_stopping) {
   10899 		mutex_exit(rxq->rxq_lock);
   10900 		return 1;
   10901 	}
   10902 
   10903 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10904 	if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   10905 		DPRINTF(sc, WM_DEBUG_RX,
   10906 		    ("%s: RX: got Rx intr %#" __PRIxBIT "\n",
   10907 			device_xname(sc->sc_dev),
   10908 			icr & (ICR_RXDMT0 | ICR_RXT0)));
   10909 		WM_Q_EVCNT_INCR(rxq, intr);
   10910 	}
   10911 #endif
   10912 	if (rxlimit > 0) {
   10913 		/*
   10914 		 * wm_rxeof() does *not* call upper layer functions directly,
   10915 		 * as if_percpuq_enqueue() just call softint_schedule().
   10916 		 * So, we can call wm_rxeof() in interrupt context.
   10917 		 */
   10918 		more = wm_rxeof(rxq, rxlimit);
   10919 	} else
   10920 		more = true;
   10921 
   10922 	mutex_exit(rxq->rxq_lock);
   10923 
   10924 	mutex_enter(sc->sc_core_lock);
   10925 
   10926 	if (sc->sc_core_stopping) {
   10927 		mutex_exit(sc->sc_core_lock);
   10928 		return 1;
   10929 	}
   10930 
   10931 	if (icr & (ICR_LSC | ICR_RXSEQ)) {
   10932 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10933 		wm_linkintr(sc, icr);
   10934 	}
   10935 	if ((icr & ICR_GPI(0)) != 0)
   10936 		device_printf(sc->sc_dev, "got module interrupt\n");
   10937 
   10938 	mutex_exit(sc->sc_core_lock);
   10939 
   10940 	if (icr & ICR_RXO) {
   10941 #if defined(WM_DEBUG)
   10942 		log(LOG_WARNING, "%s: Receive overrun\n",
   10943 		    device_xname(sc->sc_dev));
   10944 #endif /* defined(WM_DEBUG) */
   10945 	}
   10946 
   10947 	rnd_add_uint32(&sc->rnd_source, rndval);
   10948 
   10949 	if (more) {
   10950 		/* Try to get more packets going. */
   10951 		wm_legacy_intr_disable(sc);
   10952 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10953 		wm_sched_handle_queue(sc, wmq);
   10954 	}
   10955 
   10956 	return 1;
   10957 }
   10958 
   10959 static inline void
   10960 wm_txrxintr_disable(struct wm_queue *wmq)
   10961 {
   10962 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10963 
   10964 	if (__predict_false(!wm_is_using_msix(sc))) {
   10965 		wm_legacy_intr_disable(sc);
   10966 		return;
   10967 	}
   10968 
   10969 	if (sc->sc_type == WM_T_82574)
   10970 		CSR_WRITE(sc, WMREG_IMC,
   10971 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   10972 	else if (sc->sc_type == WM_T_82575)
   10973 		CSR_WRITE(sc, WMREG_EIMC,
   10974 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10975 	else
   10976 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   10977 }
   10978 
   10979 static inline void
   10980 wm_txrxintr_enable(struct wm_queue *wmq)
   10981 {
   10982 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10983 
   10984 	wm_itrs_calculate(sc, wmq);
   10985 
   10986 	if (__predict_false(!wm_is_using_msix(sc))) {
   10987 		wm_legacy_intr_enable(sc);
   10988 		return;
   10989 	}
   10990 
   10991 	/*
   10992 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   10993 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   10994 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   10995 	 * while each wm_handle_queue(wmq) is runnig.
   10996 	 */
   10997 	if (sc->sc_type == WM_T_82574)
   10998 		CSR_WRITE(sc, WMREG_IMS,
   10999 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   11000 	else if (sc->sc_type == WM_T_82575)
   11001 		CSR_WRITE(sc, WMREG_EIMS,
   11002 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   11003 	else
   11004 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   11005 }
   11006 
   11007 static int
   11008 wm_txrxintr_msix(void *arg)
   11009 {
   11010 	struct wm_queue *wmq = arg;
   11011 	struct wm_txqueue *txq = &wmq->wmq_txq;
   11012 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   11013 	struct wm_softc *sc = txq->txq_sc;
   11014 	u_int txlimit = sc->sc_tx_intr_process_limit;
   11015 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   11016 	bool txmore;
   11017 	bool rxmore;
   11018 
   11019 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   11020 
   11021 	DPRINTF(sc, WM_DEBUG_TX,
   11022 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   11023 
   11024 	wm_txrxintr_disable(wmq);
   11025 
   11026 	mutex_enter(txq->txq_lock);
   11027 
   11028 	if (txq->txq_stopping) {
   11029 		mutex_exit(txq->txq_lock);
   11030 		return 1;
   11031 	}
   11032 
   11033 	WM_Q_EVCNT_INCR(txq, txdw);
   11034 	if (txlimit > 0) {
   11035 		txmore = wm_txeof(txq, txlimit);
   11036 		/* wm_deferred start() is done in wm_handle_queue(). */
   11037 	} else
   11038 		txmore = true;
   11039 	mutex_exit(txq->txq_lock);
   11040 
   11041 	DPRINTF(sc, WM_DEBUG_RX,
   11042 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   11043 	mutex_enter(rxq->rxq_lock);
   11044 
   11045 	if (rxq->rxq_stopping) {
   11046 		mutex_exit(rxq->rxq_lock);
   11047 		return 1;
   11048 	}
   11049 
   11050 	WM_Q_EVCNT_INCR(rxq, intr);
   11051 	if (rxlimit > 0) {
   11052 		rxmore = wm_rxeof(rxq, rxlimit);
   11053 	} else
   11054 		rxmore = true;
   11055 	mutex_exit(rxq->rxq_lock);
   11056 
   11057 	wm_itrs_writereg(sc, wmq);
   11058 
   11059 	if (txmore || rxmore) {
   11060 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   11061 		wm_sched_handle_queue(sc, wmq);
   11062 	} else
   11063 		wm_txrxintr_enable(wmq);
   11064 
   11065 	return 1;
   11066 }
   11067 
   11068 static void
   11069 wm_handle_queue(void *arg)
   11070 {
   11071 	struct wm_queue *wmq = arg;
   11072 	struct wm_txqueue *txq = &wmq->wmq_txq;
   11073 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   11074 	struct wm_softc *sc = txq->txq_sc;
   11075 	u_int txlimit = sc->sc_tx_process_limit;
   11076 	u_int rxlimit = sc->sc_rx_process_limit;
   11077 	bool txmore;
   11078 	bool rxmore;
   11079 
   11080 	mutex_enter(txq->txq_lock);
   11081 	if (txq->txq_stopping) {
   11082 		mutex_exit(txq->txq_lock);
   11083 		return;
   11084 	}
   11085 	txmore = wm_txeof(txq, txlimit);
   11086 	wm_deferred_start_locked(txq);
   11087 	mutex_exit(txq->txq_lock);
   11088 
   11089 	mutex_enter(rxq->rxq_lock);
   11090 	if (rxq->rxq_stopping) {
   11091 		mutex_exit(rxq->rxq_lock);
   11092 		return;
   11093 	}
   11094 	WM_Q_EVCNT_INCR(rxq, defer);
   11095 	rxmore = wm_rxeof(rxq, rxlimit);
   11096 	mutex_exit(rxq->rxq_lock);
   11097 
   11098 	if (txmore || rxmore) {
   11099 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   11100 		wm_sched_handle_queue(sc, wmq);
   11101 	} else
   11102 		wm_txrxintr_enable(wmq);
   11103 }
   11104 
   11105 static void
   11106 wm_handle_queue_work(struct work *wk, void *context)
   11107 {
   11108 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   11109 
   11110 	/*
   11111 	 * Some qemu environment workaround.  They don't stop interrupt
   11112 	 * immediately.
   11113 	 */
   11114 	wmq->wmq_wq_enqueued = false;
   11115 	wm_handle_queue(wmq);
   11116 }
   11117 
   11118 /*
   11119  * wm_linkintr_msix:
   11120  *
   11121  *	Interrupt service routine for link status change for MSI-X.
   11122  */
   11123 static int
   11124 wm_linkintr_msix(void *arg)
   11125 {
   11126 	struct wm_softc *sc = arg;
   11127 	uint32_t reg;
   11128 	bool has_rxo;
   11129 
   11130 	reg = CSR_READ(sc, WMREG_ICR);
   11131 	mutex_enter(sc->sc_core_lock);
   11132 	DPRINTF(sc, WM_DEBUG_LINK,
   11133 	    ("%s: LINK: got link intr. ICR = %08x\n",
   11134 		device_xname(sc->sc_dev), reg));
   11135 
   11136 	if (sc->sc_core_stopping)
   11137 		goto out;
   11138 
   11139 	if ((reg & ICR_LSC) != 0) {
   11140 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   11141 		wm_linkintr(sc, ICR_LSC);
   11142 	}
   11143 	if ((reg & ICR_GPI(0)) != 0)
   11144 		device_printf(sc->sc_dev, "got module interrupt\n");
   11145 
   11146 	/*
   11147 	 * XXX 82574 MSI-X mode workaround
   11148 	 *
   11149 	 * 82574 MSI-X mode causes a receive overrun(RXO) interrupt as an
   11150 	 * ICR_OTHER MSI-X vector; furthermore it causes neither ICR_RXQ(0)
   11151 	 * nor ICR_RXQ(1) vectors. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   11152 	 * interrupts by writing WMREG_ICS to process receive packets.
   11153 	 */
   11154 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   11155 #if defined(WM_DEBUG)
   11156 		log(LOG_WARNING, "%s: Receive overrun\n",
   11157 		    device_xname(sc->sc_dev));
   11158 #endif /* defined(WM_DEBUG) */
   11159 
   11160 		has_rxo = true;
   11161 		/*
   11162 		 * The RXO interrupt is very high rate when receive traffic is
   11163 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   11164 		 * interrupts. ICR_OTHER will be enabled at the end of
   11165 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   11166 		 * ICR_RXQ(1) interrupts.
   11167 		 */
   11168 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   11169 
   11170 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   11171 	}
   11172 
   11173 
   11174 
   11175 out:
   11176 	mutex_exit(sc->sc_core_lock);
   11177 
   11178 	if (sc->sc_type == WM_T_82574) {
   11179 		if (!has_rxo)
   11180 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   11181 		else
   11182 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   11183 	} else if (sc->sc_type == WM_T_82575)
   11184 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   11185 	else
   11186 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   11187 
   11188 	return 1;
   11189 }
   11190 
   11191 /*
   11192  * Media related.
   11193  * GMII, SGMII, TBI (and SERDES)
   11194  */
   11195 
   11196 /* Common */
   11197 
   11198 /*
   11199  * wm_tbi_serdes_set_linkled:
   11200  *
   11201  *	Update the link LED on TBI and SERDES devices.
   11202  */
   11203 static void
   11204 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   11205 {
   11206 
   11207 	if (sc->sc_tbi_linkup)
   11208 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   11209 	else
   11210 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   11211 
   11212 	/* 82540 or newer devices are active low */
   11213 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   11214 
   11215 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11216 }
   11217 
   11218 /* GMII related */
   11219 
   11220 /*
   11221  * wm_gmii_reset:
   11222  *
   11223  *	Reset the PHY.
   11224  */
   11225 static void
   11226 wm_gmii_reset(struct wm_softc *sc)
   11227 {
   11228 	uint32_t reg;
   11229 	int rv;
   11230 
   11231 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11232 		device_xname(sc->sc_dev), __func__));
   11233 
   11234 	rv = sc->phy.acquire(sc);
   11235 	if (rv != 0) {
   11236 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11237 		    __func__);
   11238 		return;
   11239 	}
   11240 
   11241 	switch (sc->sc_type) {
   11242 	case WM_T_82542_2_0:
   11243 	case WM_T_82542_2_1:
   11244 		/* null */
   11245 		break;
   11246 	case WM_T_82543:
   11247 		/*
   11248 		 * With 82543, we need to force speed and duplex on the MAC
   11249 		 * equal to what the PHY speed and duplex configuration is.
   11250 		 * In addition, we need to perform a hardware reset on the PHY
   11251 		 * to take it out of reset.
   11252 		 */
   11253 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   11254 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11255 
   11256 		/* The PHY reset pin is active-low. */
   11257 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11258 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   11259 		    CTRL_EXT_SWDPIN(4));
   11260 		reg |= CTRL_EXT_SWDPIO(4);
   11261 
   11262 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11263 		CSR_WRITE_FLUSH(sc);
   11264 		delay(10*1000);
   11265 
   11266 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   11267 		CSR_WRITE_FLUSH(sc);
   11268 		delay(150);
   11269 #if 0
   11270 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   11271 #endif
   11272 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   11273 		break;
   11274 	case WM_T_82544:	/* Reset 10000us */
   11275 	case WM_T_82540:
   11276 	case WM_T_82545:
   11277 	case WM_T_82545_3:
   11278 	case WM_T_82546:
   11279 	case WM_T_82546_3:
   11280 	case WM_T_82541:
   11281 	case WM_T_82541_2:
   11282 	case WM_T_82547:
   11283 	case WM_T_82547_2:
   11284 	case WM_T_82571:	/* Reset 100us */
   11285 	case WM_T_82572:
   11286 	case WM_T_82573:
   11287 	case WM_T_82574:
   11288 	case WM_T_82575:
   11289 	case WM_T_82576:
   11290 	case WM_T_82580:
   11291 	case WM_T_I350:
   11292 	case WM_T_I354:
   11293 	case WM_T_I210:
   11294 	case WM_T_I211:
   11295 	case WM_T_82583:
   11296 	case WM_T_80003:
   11297 		/* Generic reset */
   11298 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   11299 		CSR_WRITE_FLUSH(sc);
   11300 		delay(20000);
   11301 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11302 		CSR_WRITE_FLUSH(sc);
   11303 		delay(20000);
   11304 
   11305 		if ((sc->sc_type == WM_T_82541)
   11306 		    || (sc->sc_type == WM_T_82541_2)
   11307 		    || (sc->sc_type == WM_T_82547)
   11308 		    || (sc->sc_type == WM_T_82547_2)) {
   11309 			/* Workaround for igp are done in igp_reset() */
   11310 			/* XXX add code to set LED after phy reset */
   11311 		}
   11312 		break;
   11313 	case WM_T_ICH8:
   11314 	case WM_T_ICH9:
   11315 	case WM_T_ICH10:
   11316 	case WM_T_PCH:
   11317 	case WM_T_PCH2:
   11318 	case WM_T_PCH_LPT:
   11319 	case WM_T_PCH_SPT:
   11320 	case WM_T_PCH_CNP:
   11321 	case WM_T_PCH_TGP:
   11322 		/* Generic reset */
   11323 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   11324 		CSR_WRITE_FLUSH(sc);
   11325 		delay(100);
   11326 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11327 		CSR_WRITE_FLUSH(sc);
   11328 		delay(150);
   11329 		break;
   11330 	default:
   11331 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   11332 		    __func__);
   11333 		break;
   11334 	}
   11335 
   11336 	sc->phy.release(sc);
   11337 
   11338 	/* get_cfg_done */
   11339 	wm_get_cfg_done(sc);
   11340 
   11341 	/* Extra setup */
   11342 	switch (sc->sc_type) {
   11343 	case WM_T_82542_2_0:
   11344 	case WM_T_82542_2_1:
   11345 	case WM_T_82543:
   11346 	case WM_T_82544:
   11347 	case WM_T_82540:
   11348 	case WM_T_82545:
   11349 	case WM_T_82545_3:
   11350 	case WM_T_82546:
   11351 	case WM_T_82546_3:
   11352 	case WM_T_82541_2:
   11353 	case WM_T_82547_2:
   11354 	case WM_T_82571:
   11355 	case WM_T_82572:
   11356 	case WM_T_82573:
   11357 	case WM_T_82574:
   11358 	case WM_T_82583:
   11359 	case WM_T_82575:
   11360 	case WM_T_82576:
   11361 	case WM_T_82580:
   11362 	case WM_T_I350:
   11363 	case WM_T_I354:
   11364 	case WM_T_I210:
   11365 	case WM_T_I211:
   11366 	case WM_T_80003:
   11367 		/* Null */
   11368 		break;
   11369 	case WM_T_82541:
   11370 	case WM_T_82547:
   11371 		/* XXX Configure actively LED after PHY reset */
   11372 		break;
   11373 	case WM_T_ICH8:
   11374 	case WM_T_ICH9:
   11375 	case WM_T_ICH10:
   11376 	case WM_T_PCH:
   11377 	case WM_T_PCH2:
   11378 	case WM_T_PCH_LPT:
   11379 	case WM_T_PCH_SPT:
   11380 	case WM_T_PCH_CNP:
   11381 	case WM_T_PCH_TGP:
   11382 		wm_phy_post_reset(sc);
   11383 		break;
   11384 	default:
   11385 		panic("%s: unknown type\n", __func__);
   11386 		break;
   11387 	}
   11388 }
   11389 
   11390 /*
   11391  * Set up sc_phytype and mii_{read|write}reg.
   11392  *
   11393  *  To identify PHY type, correct read/write function should be selected.
   11394  * To select correct read/write function, PCI ID or MAC type are required
   11395  * without accessing PHY registers.
   11396  *
   11397  *  On the first call of this function, PHY ID is not known yet. Check
   11398  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   11399  * result might be incorrect.
   11400  *
   11401  *  In the second call, PHY OUI and model is used to identify PHY type.
   11402  * It might not be perfect because of the lack of compared entry, but it
   11403  * would be better than the first call.
   11404  *
   11405  *  If the detected new result and previous assumption is different,
   11406  * a diagnostic message will be printed.
   11407  */
   11408 static void
   11409 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   11410     uint16_t phy_model)
   11411 {
   11412 	device_t dev = sc->sc_dev;
   11413 	struct mii_data *mii = &sc->sc_mii;
   11414 	uint16_t new_phytype = WMPHY_UNKNOWN;
   11415 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   11416 	mii_readreg_t new_readreg;
   11417 	mii_writereg_t new_writereg;
   11418 	bool dodiag = true;
   11419 
   11420 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11421 		device_xname(sc->sc_dev), __func__));
   11422 
   11423 	/*
   11424 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   11425 	 * incorrect. So don't print diag output when it's 2nd call.
   11426 	 */
   11427 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   11428 		dodiag = false;
   11429 
   11430 	if (mii->mii_readreg == NULL) {
   11431 		/*
   11432 		 *  This is the first call of this function. For ICH and PCH
   11433 		 * variants, it's difficult to determine the PHY access method
   11434 		 * by sc_type, so use the PCI product ID for some devices.
   11435 		 */
   11436 
   11437 		switch (sc->sc_pcidevid) {
   11438 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   11439 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   11440 			/* 82577 */
   11441 			new_phytype = WMPHY_82577;
   11442 			break;
   11443 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   11444 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   11445 			/* 82578 */
   11446 			new_phytype = WMPHY_82578;
   11447 			break;
   11448 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   11449 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   11450 			/* 82579 */
   11451 			new_phytype = WMPHY_82579;
   11452 			break;
   11453 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   11454 		case PCI_PRODUCT_INTEL_82801I_BM:
   11455 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   11456 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   11457 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   11458 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   11459 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   11460 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   11461 			/* ICH8, 9, 10 with 82567 */
   11462 			new_phytype = WMPHY_BM;
   11463 			break;
   11464 		default:
   11465 			break;
   11466 		}
   11467 	} else {
   11468 		/* It's not the first call. Use PHY OUI and model */
   11469 		switch (phy_oui) {
   11470 		case MII_OUI_ATTANSIC: /* atphy(4) */
   11471 			switch (phy_model) {
   11472 			case MII_MODEL_ATTANSIC_AR8021:
   11473 				new_phytype = WMPHY_82578;
   11474 				break;
   11475 			default:
   11476 				break;
   11477 			}
   11478 			break;
   11479 		case MII_OUI_xxMARVELL:
   11480 			switch (phy_model) {
   11481 			case MII_MODEL_xxMARVELL_I210:
   11482 				new_phytype = WMPHY_I210;
   11483 				break;
   11484 			case MII_MODEL_xxMARVELL_E1011:
   11485 			case MII_MODEL_xxMARVELL_E1000_3:
   11486 			case MII_MODEL_xxMARVELL_E1000_5:
   11487 			case MII_MODEL_xxMARVELL_E1112:
   11488 				new_phytype = WMPHY_M88;
   11489 				break;
   11490 			case MII_MODEL_xxMARVELL_E1149:
   11491 				new_phytype = WMPHY_BM;
   11492 				break;
   11493 			case MII_MODEL_xxMARVELL_E1111:
   11494 			case MII_MODEL_xxMARVELL_I347:
   11495 			case MII_MODEL_xxMARVELL_E1512:
   11496 			case MII_MODEL_xxMARVELL_E1340M:
   11497 			case MII_MODEL_xxMARVELL_E1543:
   11498 				new_phytype = WMPHY_M88;
   11499 				break;
   11500 			case MII_MODEL_xxMARVELL_I82563:
   11501 				new_phytype = WMPHY_GG82563;
   11502 				break;
   11503 			default:
   11504 				break;
   11505 			}
   11506 			break;
   11507 		case MII_OUI_INTEL:
   11508 			switch (phy_model) {
   11509 			case MII_MODEL_INTEL_I82577:
   11510 				new_phytype = WMPHY_82577;
   11511 				break;
   11512 			case MII_MODEL_INTEL_I82579:
   11513 				new_phytype = WMPHY_82579;
   11514 				break;
   11515 			case MII_MODEL_INTEL_I217:
   11516 				new_phytype = WMPHY_I217;
   11517 				break;
   11518 			case MII_MODEL_INTEL_I82580:
   11519 				new_phytype = WMPHY_82580;
   11520 				break;
   11521 			case MII_MODEL_INTEL_I350:
   11522 				new_phytype = WMPHY_I350;
   11523 				break;
   11524 			default:
   11525 				break;
   11526 			}
   11527 			break;
   11528 		case MII_OUI_yyINTEL:
   11529 			switch (phy_model) {
   11530 			case MII_MODEL_yyINTEL_I82562G:
   11531 			case MII_MODEL_yyINTEL_I82562EM:
   11532 			case MII_MODEL_yyINTEL_I82562ET:
   11533 				new_phytype = WMPHY_IFE;
   11534 				break;
   11535 			case MII_MODEL_yyINTEL_IGP01E1000:
   11536 				new_phytype = WMPHY_IGP;
   11537 				break;
   11538 			case MII_MODEL_yyINTEL_I82566:
   11539 				new_phytype = WMPHY_IGP_3;
   11540 				break;
   11541 			default:
   11542 				break;
   11543 			}
   11544 			break;
   11545 		default:
   11546 			break;
   11547 		}
   11548 
   11549 		if (dodiag) {
   11550 			if (new_phytype == WMPHY_UNKNOWN)
   11551 				aprint_verbose_dev(dev,
   11552 				    "%s: Unknown PHY model. OUI=%06x, "
   11553 				    "model=%04x\n", __func__, phy_oui,
   11554 				    phy_model);
   11555 
   11556 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11557 			    && (sc->sc_phytype != new_phytype)) {
   11558 				aprint_error_dev(dev, "Previously assumed PHY "
   11559 				    "type(%u) was incorrect. PHY type from PHY"
   11560 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   11561 			}
   11562 		}
   11563 	}
   11564 
   11565 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   11566 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   11567 		/* SGMII */
   11568 		new_readreg = wm_sgmii_readreg;
   11569 		new_writereg = wm_sgmii_writereg;
   11570 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11571 		/* BM2 (phyaddr == 1) */
   11572 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11573 		    && (new_phytype != WMPHY_BM)
   11574 		    && (new_phytype != WMPHY_UNKNOWN))
   11575 			doubt_phytype = new_phytype;
   11576 		new_phytype = WMPHY_BM;
   11577 		new_readreg = wm_gmii_bm_readreg;
   11578 		new_writereg = wm_gmii_bm_writereg;
   11579 	} else if (sc->sc_type >= WM_T_PCH) {
   11580 		/* All PCH* use _hv_ */
   11581 		new_readreg = wm_gmii_hv_readreg;
   11582 		new_writereg = wm_gmii_hv_writereg;
   11583 	} else if (sc->sc_type >= WM_T_ICH8) {
   11584 		/* non-82567 ICH8, 9 and 10 */
   11585 		new_readreg = wm_gmii_i82544_readreg;
   11586 		new_writereg = wm_gmii_i82544_writereg;
   11587 	} else if (sc->sc_type >= WM_T_80003) {
   11588 		/* 80003 */
   11589 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11590 		    && (new_phytype != WMPHY_GG82563)
   11591 		    && (new_phytype != WMPHY_UNKNOWN))
   11592 			doubt_phytype = new_phytype;
   11593 		new_phytype = WMPHY_GG82563;
   11594 		new_readreg = wm_gmii_i80003_readreg;
   11595 		new_writereg = wm_gmii_i80003_writereg;
   11596 	} else if (sc->sc_type >= WM_T_I210) {
   11597 		/* I210 and I211 */
   11598 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11599 		    && (new_phytype != WMPHY_I210)
   11600 		    && (new_phytype != WMPHY_UNKNOWN))
   11601 			doubt_phytype = new_phytype;
   11602 		new_phytype = WMPHY_I210;
   11603 		new_readreg = wm_gmii_gs40g_readreg;
   11604 		new_writereg = wm_gmii_gs40g_writereg;
   11605 	} else if (sc->sc_type >= WM_T_82580) {
   11606 		/* 82580, I350 and I354 */
   11607 		new_readreg = wm_gmii_82580_readreg;
   11608 		new_writereg = wm_gmii_82580_writereg;
   11609 	} else if (sc->sc_type >= WM_T_82544) {
   11610 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   11611 		new_readreg = wm_gmii_i82544_readreg;
   11612 		new_writereg = wm_gmii_i82544_writereg;
   11613 	} else {
   11614 		new_readreg = wm_gmii_i82543_readreg;
   11615 		new_writereg = wm_gmii_i82543_writereg;
   11616 	}
   11617 
   11618 	if (new_phytype == WMPHY_BM) {
   11619 		/* All BM use _bm_ */
   11620 		new_readreg = wm_gmii_bm_readreg;
   11621 		new_writereg = wm_gmii_bm_writereg;
   11622 	}
   11623 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_TGP)) {
   11624 		/* All PCH* use _hv_ */
   11625 		new_readreg = wm_gmii_hv_readreg;
   11626 		new_writereg = wm_gmii_hv_writereg;
   11627 	}
   11628 
   11629 	/* Diag output */
   11630 	if (dodiag) {
   11631 		if (doubt_phytype != WMPHY_UNKNOWN)
   11632 			aprint_error_dev(dev, "Assumed new PHY type was "
   11633 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   11634 			    new_phytype);
   11635 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11636 		    && (sc->sc_phytype != new_phytype))
   11637 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   11638 			    "was incorrect. New PHY type = %u\n",
   11639 			    sc->sc_phytype, new_phytype);
   11640 
   11641 		if ((mii->mii_readreg != NULL) &&
   11642 		    (new_phytype == WMPHY_UNKNOWN))
   11643 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   11644 
   11645 		if ((mii->mii_readreg != NULL) &&
   11646 		    (mii->mii_readreg != new_readreg))
   11647 			aprint_error_dev(dev, "Previously assumed PHY "
   11648 			    "read/write function was incorrect.\n");
   11649 	}
   11650 
   11651 	/* Update now */
   11652 	sc->sc_phytype = new_phytype;
   11653 	mii->mii_readreg = new_readreg;
   11654 	mii->mii_writereg = new_writereg;
   11655 	if (new_readreg == wm_gmii_hv_readreg) {
   11656 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   11657 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   11658 	} else if (new_readreg == wm_sgmii_readreg) {
   11659 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   11660 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   11661 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   11662 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   11663 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   11664 	}
   11665 }
   11666 
   11667 /*
   11668  * wm_get_phy_id_82575:
   11669  *
   11670  * Return PHY ID. Return -1 if it failed.
   11671  */
   11672 static int
   11673 wm_get_phy_id_82575(struct wm_softc *sc)
   11674 {
   11675 	uint32_t reg;
   11676 	int phyid = -1;
   11677 
   11678 	/* XXX */
   11679 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   11680 		return -1;
   11681 
   11682 	if (wm_sgmii_uses_mdio(sc)) {
   11683 		switch (sc->sc_type) {
   11684 		case WM_T_82575:
   11685 		case WM_T_82576:
   11686 			reg = CSR_READ(sc, WMREG_MDIC);
   11687 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   11688 			break;
   11689 		case WM_T_82580:
   11690 		case WM_T_I350:
   11691 		case WM_T_I354:
   11692 		case WM_T_I210:
   11693 		case WM_T_I211:
   11694 			reg = CSR_READ(sc, WMREG_MDICNFG);
   11695 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   11696 			break;
   11697 		default:
   11698 			return -1;
   11699 		}
   11700 	}
   11701 
   11702 	return phyid;
   11703 }
   11704 
   11705 /*
   11706  * wm_gmii_mediainit:
   11707  *
   11708  *	Initialize media for use on 1000BASE-T devices.
   11709  */
   11710 static void
   11711 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   11712 {
   11713 	device_t dev = sc->sc_dev;
   11714 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11715 	struct mii_data *mii = &sc->sc_mii;
   11716 
   11717 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11718 		device_xname(sc->sc_dev), __func__));
   11719 
   11720 	/* We have GMII. */
   11721 	sc->sc_flags |= WM_F_HAS_MII;
   11722 
   11723 	if (sc->sc_type == WM_T_80003)
   11724 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11725 	else
   11726 		sc->sc_tipg = TIPG_1000T_DFLT;
   11727 
   11728 	/*
   11729 	 * Let the chip set speed/duplex on its own based on
   11730 	 * signals from the PHY.
   11731 	 * XXXbouyer - I'm not sure this is right for the 80003,
   11732 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   11733 	 */
   11734 	sc->sc_ctrl |= CTRL_SLU;
   11735 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11736 
   11737 	/* Initialize our media structures and probe the GMII. */
   11738 	mii->mii_ifp = ifp;
   11739 
   11740 	mii->mii_statchg = wm_gmii_statchg;
   11741 
   11742 	/* get PHY control from SMBus to PCIe */
   11743 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   11744 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   11745 	    || (sc->sc_type == WM_T_PCH_CNP) || (sc->sc_type == WM_T_PCH_TGP))
   11746 		wm_init_phy_workarounds_pchlan(sc);
   11747 
   11748 	wm_gmii_reset(sc);
   11749 
   11750 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11751 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   11752 	    wm_gmii_mediastatus, sc->sc_core_lock);
   11753 
   11754 	/* Setup internal SGMII PHY for SFP */
   11755 	wm_sgmii_sfp_preconfig(sc);
   11756 
   11757 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   11758 	    || (sc->sc_type == WM_T_82580)
   11759 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   11760 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   11761 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   11762 			/* Attach only one port */
   11763 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   11764 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11765 		} else {
   11766 			int i, id;
   11767 			uint32_t ctrl_ext;
   11768 
   11769 			id = wm_get_phy_id_82575(sc);
   11770 			if (id != -1) {
   11771 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   11772 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   11773 			}
   11774 			if ((id == -1)
   11775 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11776 				/* Power on sgmii phy if it is disabled */
   11777 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11778 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   11779 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   11780 				CSR_WRITE_FLUSH(sc);
   11781 				delay(300*1000); /* XXX too long */
   11782 
   11783 				/*
   11784 				 * From 1 to 8.
   11785 				 *
   11786 				 * I2C access fails with I2C register's ERROR
   11787 				 * bit set, so prevent error message while
   11788 				 * scanning.
   11789 				 */
   11790 				sc->phy.no_errprint = true;
   11791 				for (i = 1; i < 8; i++)
   11792 					mii_attach(sc->sc_dev, &sc->sc_mii,
   11793 					    0xffffffff, i, MII_OFFSET_ANY,
   11794 					    MIIF_DOPAUSE);
   11795 				sc->phy.no_errprint = false;
   11796 
   11797 				/* Restore previous sfp cage power state */
   11798 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11799 			}
   11800 		}
   11801 	} else
   11802 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11803 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11804 
   11805 	/*
   11806 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   11807 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   11808 	 */
   11809 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   11810 		(sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)
   11811 		|| (sc->sc_type == WM_T_PCH_TGP))
   11812 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11813 		wm_set_mdio_slow_mode_hv(sc);
   11814 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11815 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11816 	}
   11817 
   11818 	/*
   11819 	 * (For ICH8 variants)
   11820 	 * If PHY detection failed, use BM's r/w function and retry.
   11821 	 */
   11822 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11823 		/* if failed, retry with *_bm_* */
   11824 		aprint_verbose_dev(dev, "Assumed PHY access function "
   11825 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   11826 		    sc->sc_phytype);
   11827 		sc->sc_phytype = WMPHY_BM;
   11828 		mii->mii_readreg = wm_gmii_bm_readreg;
   11829 		mii->mii_writereg = wm_gmii_bm_writereg;
   11830 
   11831 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11832 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11833 	}
   11834 
   11835 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11836 		/* Any PHY wasn't found */
   11837 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   11838 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   11839 		sc->sc_phytype = WMPHY_NONE;
   11840 	} else {
   11841 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   11842 
   11843 		/*
   11844 		 * PHY found! Check PHY type again by the second call of
   11845 		 * wm_gmii_setup_phytype.
   11846 		 */
   11847 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   11848 		    child->mii_mpd_model);
   11849 
   11850 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   11851 	}
   11852 }
   11853 
   11854 /*
   11855  * wm_gmii_mediachange:	[ifmedia interface function]
   11856  *
   11857  *	Set hardware to newly-selected media on a 1000BASE-T device.
   11858  */
   11859 static int
   11860 wm_gmii_mediachange(struct ifnet *ifp)
   11861 {
   11862 	struct wm_softc *sc = ifp->if_softc;
   11863 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11864 	uint32_t reg;
   11865 	int rc;
   11866 
   11867 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11868 		device_xname(sc->sc_dev), __func__));
   11869 
   11870 	KASSERT(mutex_owned(sc->sc_core_lock));
   11871 
   11872 	if ((sc->sc_if_flags & IFF_UP) == 0)
   11873 		return 0;
   11874 
   11875 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   11876 	if ((sc->sc_type == WM_T_82580)
   11877 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   11878 	    || (sc->sc_type == WM_T_I211)) {
   11879 		reg = CSR_READ(sc, WMREG_PHPM);
   11880 		reg &= ~PHPM_GO_LINK_D;
   11881 		CSR_WRITE(sc, WMREG_PHPM, reg);
   11882 	}
   11883 
   11884 	/* Disable D0 LPLU. */
   11885 	wm_lplu_d0_disable(sc);
   11886 
   11887 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   11888 	sc->sc_ctrl |= CTRL_SLU;
   11889 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11890 	    || (sc->sc_type > WM_T_82543)) {
   11891 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   11892 	} else {
   11893 		sc->sc_ctrl &= ~CTRL_ASDE;
   11894 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   11895 		if (ife->ifm_media & IFM_FDX)
   11896 			sc->sc_ctrl |= CTRL_FD;
   11897 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   11898 		case IFM_10_T:
   11899 			sc->sc_ctrl |= CTRL_SPEED_10;
   11900 			break;
   11901 		case IFM_100_TX:
   11902 			sc->sc_ctrl |= CTRL_SPEED_100;
   11903 			break;
   11904 		case IFM_1000_T:
   11905 			sc->sc_ctrl |= CTRL_SPEED_1000;
   11906 			break;
   11907 		case IFM_NONE:
   11908 			/* There is no specific setting for IFM_NONE */
   11909 			break;
   11910 		default:
   11911 			panic("wm_gmii_mediachange: bad media 0x%x",
   11912 			    ife->ifm_media);
   11913 		}
   11914 	}
   11915 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11916 	CSR_WRITE_FLUSH(sc);
   11917 
   11918 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11919 		wm_serdes_mediachange(ifp);
   11920 
   11921 	if (sc->sc_type <= WM_T_82543)
   11922 		wm_gmii_reset(sc);
   11923 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   11924 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   11925 		/* allow time for SFP cage time to power up phy */
   11926 		delay(300 * 1000);
   11927 		wm_gmii_reset(sc);
   11928 	}
   11929 
   11930 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   11931 		return 0;
   11932 	return rc;
   11933 }
   11934 
   11935 /*
   11936  * wm_gmii_mediastatus:	[ifmedia interface function]
   11937  *
   11938  *	Get the current interface media status on a 1000BASE-T device.
   11939  */
   11940 static void
   11941 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11942 {
   11943 	struct wm_softc *sc = ifp->if_softc;
   11944 	struct ethercom *ec = &sc->sc_ethercom;
   11945 	struct mii_data *mii;
   11946 	bool dopoll = true;
   11947 
   11948 	/*
   11949 	 * In normal drivers, ether_mediastatus() is called here.
   11950 	 * To avoid calling mii_pollstat(), ether_mediastatus() is open coded.
   11951 	 */
   11952 	KASSERT(mutex_owned(sc->sc_core_lock));
   11953 	KASSERT(ec->ec_mii != NULL);
   11954 	KASSERT(mii_locked(ec->ec_mii));
   11955 
   11956 	mii = ec->ec_mii;
   11957 	if ((sc->sc_flags & WM_F_DELAY_LINKUP) != 0) {
   11958 		struct timeval now;
   11959 
   11960 		getmicrotime(&now);
   11961 		if (timercmp(&now, &sc->sc_linkup_delay_time, <))
   11962 			dopoll = false;
   11963 		else if (sc->sc_linkup_delay_time.tv_sec != 0) {
   11964 			/* Simplify by checking tv_sec only. It's enough. */
   11965 
   11966 			sc->sc_linkup_delay_time.tv_sec = 0;
   11967 			sc->sc_linkup_delay_time.tv_usec = 0;
   11968 		}
   11969 	}
   11970 
   11971 	/*
   11972 	 * Don't call mii_pollstat() while doing workaround.
   11973 	 * See also wm_linkintr_gmii() and wm_tick().
   11974 	 */
   11975 	if (dopoll)
   11976 		mii_pollstat(mii);
   11977 	ifmr->ifm_active = mii->mii_media_active;
   11978 	ifmr->ifm_status = mii->mii_media_status;
   11979 
   11980 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11981 	    | sc->sc_flowflags;
   11982 }
   11983 
   11984 #define	MDI_IO		CTRL_SWDPIN(2)
   11985 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   11986 #define	MDI_CLK		CTRL_SWDPIN(3)
   11987 
   11988 static void
   11989 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   11990 {
   11991 	uint32_t i, v;
   11992 
   11993 	v = CSR_READ(sc, WMREG_CTRL);
   11994 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11995 	v |= MDI_DIR | CTRL_SWDPIO(3);
   11996 
   11997 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   11998 		if (data & i)
   11999 			v |= MDI_IO;
   12000 		else
   12001 			v &= ~MDI_IO;
   12002 		CSR_WRITE(sc, WMREG_CTRL, v);
   12003 		CSR_WRITE_FLUSH(sc);
   12004 		delay(10);
   12005 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   12006 		CSR_WRITE_FLUSH(sc);
   12007 		delay(10);
   12008 		CSR_WRITE(sc, WMREG_CTRL, v);
   12009 		CSR_WRITE_FLUSH(sc);
   12010 		delay(10);
   12011 	}
   12012 }
   12013 
   12014 static uint16_t
   12015 wm_i82543_mii_recvbits(struct wm_softc *sc)
   12016 {
   12017 	uint32_t v, i;
   12018 	uint16_t data = 0;
   12019 
   12020 	v = CSR_READ(sc, WMREG_CTRL);
   12021 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   12022 	v |= CTRL_SWDPIO(3);
   12023 
   12024 	CSR_WRITE(sc, WMREG_CTRL, v);
   12025 	CSR_WRITE_FLUSH(sc);
   12026 	delay(10);
   12027 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   12028 	CSR_WRITE_FLUSH(sc);
   12029 	delay(10);
   12030 	CSR_WRITE(sc, WMREG_CTRL, v);
   12031 	CSR_WRITE_FLUSH(sc);
   12032 	delay(10);
   12033 
   12034 	for (i = 0; i < 16; i++) {
   12035 		data <<= 1;
   12036 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   12037 		CSR_WRITE_FLUSH(sc);
   12038 		delay(10);
   12039 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   12040 			data |= 1;
   12041 		CSR_WRITE(sc, WMREG_CTRL, v);
   12042 		CSR_WRITE_FLUSH(sc);
   12043 		delay(10);
   12044 	}
   12045 
   12046 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   12047 	CSR_WRITE_FLUSH(sc);
   12048 	delay(10);
   12049 	CSR_WRITE(sc, WMREG_CTRL, v);
   12050 	CSR_WRITE_FLUSH(sc);
   12051 	delay(10);
   12052 
   12053 	return data;
   12054 }
   12055 
   12056 #undef MDI_IO
   12057 #undef MDI_DIR
   12058 #undef MDI_CLK
   12059 
   12060 /*
   12061  * wm_gmii_i82543_readreg:	[mii interface function]
   12062  *
   12063  *	Read a PHY register on the GMII (i82543 version).
   12064  */
   12065 static int
   12066 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12067 {
   12068 	struct wm_softc *sc = device_private(dev);
   12069 
   12070 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   12071 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   12072 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   12073 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   12074 
   12075 	DPRINTF(sc, WM_DEBUG_GMII,
   12076 	    ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   12077 		device_xname(dev), phy, reg, *val));
   12078 
   12079 	return 0;
   12080 }
   12081 
   12082 /*
   12083  * wm_gmii_i82543_writereg:	[mii interface function]
   12084  *
   12085  *	Write a PHY register on the GMII (i82543 version).
   12086  */
   12087 static int
   12088 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   12089 {
   12090 	struct wm_softc *sc = device_private(dev);
   12091 
   12092 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   12093 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   12094 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   12095 	    (MII_COMMAND_START << 30), 32);
   12096 
   12097 	return 0;
   12098 }
   12099 
   12100 /*
   12101  * wm_gmii_mdic_readreg:	[mii interface function]
   12102  *
   12103  *	Read a PHY register on the GMII.
   12104  */
   12105 static int
   12106 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12107 {
   12108 	struct wm_softc *sc = device_private(dev);
   12109 	uint32_t mdic = 0;
   12110 	int i;
   12111 
   12112 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   12113 	    && (reg > MII_ADDRMASK)) {
   12114 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12115 		    __func__, sc->sc_phytype, reg);
   12116 		reg &= MII_ADDRMASK;
   12117 	}
   12118 
   12119 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   12120 	    MDIC_REGADD(reg));
   12121 
   12122 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   12123 		delay(50);
   12124 		mdic = CSR_READ(sc, WMREG_MDIC);
   12125 		if (mdic & MDIC_READY)
   12126 			break;
   12127 	}
   12128 
   12129 	if ((mdic & MDIC_READY) == 0) {
   12130 		DPRINTF(sc, WM_DEBUG_GMII,
   12131 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   12132 			device_xname(dev), phy, reg));
   12133 		return ETIMEDOUT;
   12134 	} else if (mdic & MDIC_E) {
   12135 		/* This is normal if no PHY is present. */
   12136 		DPRINTF(sc, WM_DEBUG_GMII,
   12137 		    ("%s: MDIC read error: phy %d reg %d\n",
   12138 			device_xname(sc->sc_dev), phy, reg));
   12139 		return -1;
   12140 	} else
   12141 		*val = MDIC_DATA(mdic);
   12142 
   12143 	/*
   12144 	 * Allow some time after each MDIC transaction to avoid
   12145 	 * reading duplicate data in the next MDIC transaction.
   12146 	 */
   12147 	if (sc->sc_type == WM_T_PCH2)
   12148 		delay(100);
   12149 
   12150 	return 0;
   12151 }
   12152 
   12153 /*
   12154  * wm_gmii_mdic_writereg:	[mii interface function]
   12155  *
   12156  *	Write a PHY register on the GMII.
   12157  */
   12158 static int
   12159 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   12160 {
   12161 	struct wm_softc *sc = device_private(dev);
   12162 	uint32_t mdic = 0;
   12163 	int i;
   12164 
   12165 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   12166 	    && (reg > MII_ADDRMASK)) {
   12167 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12168 		    __func__, sc->sc_phytype, reg);
   12169 		reg &= MII_ADDRMASK;
   12170 	}
   12171 
   12172 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   12173 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   12174 
   12175 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   12176 		delay(50);
   12177 		mdic = CSR_READ(sc, WMREG_MDIC);
   12178 		if (mdic & MDIC_READY)
   12179 			break;
   12180 	}
   12181 
   12182 	if ((mdic & MDIC_READY) == 0) {
   12183 		DPRINTF(sc, WM_DEBUG_GMII,
   12184 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   12185 			device_xname(dev), phy, reg));
   12186 		return ETIMEDOUT;
   12187 	} else if (mdic & MDIC_E) {
   12188 		DPRINTF(sc, WM_DEBUG_GMII,
   12189 		    ("%s: MDIC write error: phy %d reg %d\n",
   12190 			device_xname(dev), phy, reg));
   12191 		return -1;
   12192 	}
   12193 
   12194 	/*
   12195 	 * Allow some time after each MDIC transaction to avoid
   12196 	 * reading duplicate data in the next MDIC transaction.
   12197 	 */
   12198 	if (sc->sc_type == WM_T_PCH2)
   12199 		delay(100);
   12200 
   12201 	return 0;
   12202 }
   12203 
   12204 /*
   12205  * wm_gmii_i82544_readreg:	[mii interface function]
   12206  *
   12207  *	Read a PHY register on the GMII.
   12208  */
   12209 static int
   12210 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12211 {
   12212 	struct wm_softc *sc = device_private(dev);
   12213 	int rv;
   12214 
   12215 	rv = sc->phy.acquire(sc);
   12216 	if (rv != 0) {
   12217 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12218 		return rv;
   12219 	}
   12220 
   12221 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   12222 
   12223 	sc->phy.release(sc);
   12224 
   12225 	return rv;
   12226 }
   12227 
   12228 static int
   12229 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12230 {
   12231 	struct wm_softc *sc = device_private(dev);
   12232 	int rv;
   12233 
   12234 	switch (sc->sc_phytype) {
   12235 	case WMPHY_IGP:
   12236 	case WMPHY_IGP_2:
   12237 	case WMPHY_IGP_3:
   12238 		if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   12239 			rv = wm_gmii_mdic_writereg(dev, phy,
   12240 			    IGPHY_PAGE_SELECT, reg);
   12241 			if (rv != 0)
   12242 				return rv;
   12243 		}
   12244 		break;
   12245 	default:
   12246 #ifdef WM_DEBUG
   12247 		if ((reg >> MII_ADDRBITS) != 0)
   12248 			device_printf(dev,
   12249 			    "%s: PHYTYPE = 0x%x, addr = 0x%02x\n",
   12250 			    __func__, sc->sc_phytype, reg);
   12251 #endif
   12252 		break;
   12253 	}
   12254 
   12255 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   12256 }
   12257 
   12258 /*
   12259  * wm_gmii_i82544_writereg:	[mii interface function]
   12260  *
   12261  *	Write a PHY register on the GMII.
   12262  */
   12263 static int
   12264 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   12265 {
   12266 	struct wm_softc *sc = device_private(dev);
   12267 	int rv;
   12268 
   12269 	rv = sc->phy.acquire(sc);
   12270 	if (rv != 0) {
   12271 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12272 		return rv;
   12273 	}
   12274 
   12275 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   12276 	sc->phy.release(sc);
   12277 
   12278 	return rv;
   12279 }
   12280 
   12281 static int
   12282 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12283 {
   12284 	struct wm_softc *sc = device_private(dev);
   12285 	int rv;
   12286 
   12287 	switch (sc->sc_phytype) {
   12288 	case WMPHY_IGP:
   12289 	case WMPHY_IGP_2:
   12290 	case WMPHY_IGP_3:
   12291 		if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   12292 			rv = wm_gmii_mdic_writereg(dev, phy,
   12293 			    IGPHY_PAGE_SELECT, reg);
   12294 			if (rv != 0)
   12295 				return rv;
   12296 		}
   12297 		break;
   12298 	default:
   12299 #ifdef WM_DEBUG
   12300 		if ((reg >> MII_ADDRBITS) != 0)
   12301 			device_printf(dev,
   12302 			    "%s: PHYTYPE == 0x%x, addr = 0x%02x",
   12303 			    __func__, sc->sc_phytype, reg);
   12304 #endif
   12305 		break;
   12306 	}
   12307 
   12308 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   12309 }
   12310 
   12311 /*
   12312  * wm_gmii_i80003_readreg:	[mii interface function]
   12313  *
   12314  *	Read a PHY register on the kumeran
   12315  * This could be handled by the PHY layer if we didn't have to lock the
   12316  * resource ...
   12317  */
   12318 static int
   12319 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12320 {
   12321 	struct wm_softc *sc = device_private(dev);
   12322 	int page_select;
   12323 	uint16_t temp, temp2;
   12324 	int rv;
   12325 
   12326 	if (phy != 1) /* Only one PHY on kumeran bus */
   12327 		return -1;
   12328 
   12329 	rv = sc->phy.acquire(sc);
   12330 	if (rv != 0) {
   12331 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12332 		return rv;
   12333 	}
   12334 
   12335 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   12336 		page_select = GG82563_PHY_PAGE_SELECT;
   12337 	else {
   12338 		/*
   12339 		 * Use Alternative Page Select register to access registers
   12340 		 * 30 and 31.
   12341 		 */
   12342 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   12343 	}
   12344 	temp = reg >> GG82563_PAGE_SHIFT;
   12345 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   12346 		goto out;
   12347 
   12348 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   12349 		/*
   12350 		 * Wait more 200us for a bug of the ready bit in the MDIC
   12351 		 * register.
   12352 		 */
   12353 		delay(200);
   12354 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   12355 		if ((rv != 0) || (temp2 != temp)) {
   12356 			device_printf(dev, "%s failed\n", __func__);
   12357 			rv = -1;
   12358 			goto out;
   12359 		}
   12360 		delay(200);
   12361 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   12362 		delay(200);
   12363 	} else
   12364 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   12365 
   12366 out:
   12367 	sc->phy.release(sc);
   12368 	return rv;
   12369 }
   12370 
   12371 /*
   12372  * wm_gmii_i80003_writereg:	[mii interface function]
   12373  *
   12374  *	Write a PHY register on the kumeran.
   12375  * This could be handled by the PHY layer if we didn't have to lock the
   12376  * resource ...
   12377  */
   12378 static int
   12379 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   12380 {
   12381 	struct wm_softc *sc = device_private(dev);
   12382 	int page_select, rv;
   12383 	uint16_t temp, temp2;
   12384 
   12385 	if (phy != 1) /* Only one PHY on kumeran bus */
   12386 		return -1;
   12387 
   12388 	rv = sc->phy.acquire(sc);
   12389 	if (rv != 0) {
   12390 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12391 		return rv;
   12392 	}
   12393 
   12394 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   12395 		page_select = GG82563_PHY_PAGE_SELECT;
   12396 	else {
   12397 		/*
   12398 		 * Use Alternative Page Select register to access registers
   12399 		 * 30 and 31.
   12400 		 */
   12401 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   12402 	}
   12403 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   12404 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   12405 		goto out;
   12406 
   12407 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   12408 		/*
   12409 		 * Wait more 200us for a bug of the ready bit in the MDIC
   12410 		 * register.
   12411 		 */
   12412 		delay(200);
   12413 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   12414 		if ((rv != 0) || (temp2 != temp)) {
   12415 			device_printf(dev, "%s failed\n", __func__);
   12416 			rv = -1;
   12417 			goto out;
   12418 		}
   12419 		delay(200);
   12420 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   12421 		delay(200);
   12422 	} else
   12423 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   12424 
   12425 out:
   12426 	sc->phy.release(sc);
   12427 	return rv;
   12428 }
   12429 
   12430 /*
   12431  * wm_gmii_bm_readreg:	[mii interface function]
   12432  *
   12433  *	Read a PHY register on the kumeran
   12434  * This could be handled by the PHY layer if we didn't have to lock the
   12435  * resource ...
   12436  */
   12437 static int
   12438 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12439 {
   12440 	struct wm_softc *sc = device_private(dev);
   12441 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   12442 	int rv;
   12443 
   12444 	rv = sc->phy.acquire(sc);
   12445 	if (rv != 0) {
   12446 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12447 		return rv;
   12448 	}
   12449 
   12450 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   12451 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   12452 		    || (reg == 31)) ? 1 : phy;
   12453 	/* Page 800 works differently than the rest so it has its own func */
   12454 	if (page == BM_WUC_PAGE) {
   12455 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   12456 		goto release;
   12457 	}
   12458 
   12459 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   12460 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   12461 		    && (sc->sc_type != WM_T_82583))
   12462 			rv = wm_gmii_mdic_writereg(dev, phy,
   12463 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12464 		else
   12465 			rv = wm_gmii_mdic_writereg(dev, phy,
   12466 			    BME1000_PHY_PAGE_SELECT, page);
   12467 		if (rv != 0)
   12468 			goto release;
   12469 	}
   12470 
   12471 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   12472 
   12473 release:
   12474 	sc->phy.release(sc);
   12475 	return rv;
   12476 }
   12477 
   12478 /*
   12479  * wm_gmii_bm_writereg:	[mii interface function]
   12480  *
   12481  *	Write a PHY register on the kumeran.
   12482  * This could be handled by the PHY layer if we didn't have to lock the
   12483  * resource ...
   12484  */
   12485 static int
   12486 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   12487 {
   12488 	struct wm_softc *sc = device_private(dev);
   12489 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   12490 	int rv;
   12491 
   12492 	rv = sc->phy.acquire(sc);
   12493 	if (rv != 0) {
   12494 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12495 		return rv;
   12496 	}
   12497 
   12498 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   12499 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   12500 		    || (reg == 31)) ? 1 : phy;
   12501 	/* Page 800 works differently than the rest so it has its own func */
   12502 	if (page == BM_WUC_PAGE) {
   12503 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   12504 		goto release;
   12505 	}
   12506 
   12507 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   12508 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   12509 		    && (sc->sc_type != WM_T_82583))
   12510 			rv = wm_gmii_mdic_writereg(dev, phy,
   12511 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12512 		else
   12513 			rv = wm_gmii_mdic_writereg(dev, phy,
   12514 			    BME1000_PHY_PAGE_SELECT, page);
   12515 		if (rv != 0)
   12516 			goto release;
   12517 	}
   12518 
   12519 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   12520 
   12521 release:
   12522 	sc->phy.release(sc);
   12523 	return rv;
   12524 }
   12525 
   12526 /*
   12527  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   12528  *  @dev: pointer to the HW structure
   12529  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   12530  *
   12531  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   12532  *  address to store contents of the BM_WUC_ENABLE_REG register.
   12533  */
   12534 static int
   12535 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   12536 {
   12537 #ifdef WM_DEBUG
   12538 	struct wm_softc *sc = device_private(dev);
   12539 #endif
   12540 	uint16_t temp;
   12541 	int rv;
   12542 
   12543 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   12544 		device_xname(dev), __func__));
   12545 
   12546 	if (!phy_regp)
   12547 		return -1;
   12548 
   12549 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   12550 
   12551 	/* Select Port Control Registers page */
   12552 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12553 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   12554 	if (rv != 0)
   12555 		return rv;
   12556 
   12557 	/* Read WUCE and save it */
   12558 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   12559 	if (rv != 0)
   12560 		return rv;
   12561 
   12562 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   12563 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   12564 	 */
   12565 	temp = *phy_regp;
   12566 	temp |= BM_WUC_ENABLE_BIT;
   12567 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   12568 
   12569 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   12570 		return rv;
   12571 
   12572 	/* Select Host Wakeup Registers page - caller now able to write
   12573 	 * registers on the Wakeup registers page
   12574 	 */
   12575 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12576 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   12577 }
   12578 
   12579 /*
   12580  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   12581  *  @dev: pointer to the HW structure
   12582  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   12583  *
   12584  *  Restore BM_WUC_ENABLE_REG to its original value.
   12585  *
   12586  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   12587  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   12588  *  caller.
   12589  */
   12590 static int
   12591 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   12592 {
   12593 #ifdef WM_DEBUG
   12594 	struct wm_softc *sc = device_private(dev);
   12595 #endif
   12596 
   12597 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   12598 		device_xname(dev), __func__));
   12599 
   12600 	if (!phy_regp)
   12601 		return -1;
   12602 
   12603 	/* Select Port Control Registers page */
   12604 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12605 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   12606 
   12607 	/* Restore 769.17 to its original value */
   12608 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   12609 
   12610 	return 0;
   12611 }
   12612 
   12613 /*
   12614  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   12615  *  @sc: pointer to the HW structure
   12616  *  @offset: register offset to be read or written
   12617  *  @val: pointer to the data to read or write
   12618  *  @rd: determines if operation is read or write
   12619  *  @page_set: BM_WUC_PAGE already set and access enabled
   12620  *
   12621  *  Read the PHY register at offset and store the retrieved information in
   12622  *  data, or write data to PHY register at offset.  Note the procedure to
   12623  *  access the PHY wakeup registers is different than reading the other PHY
   12624  *  registers. It works as such:
   12625  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   12626  *  2) Set page to 800 for host (801 if we were manageability)
   12627  *  3) Write the address using the address opcode (0x11)
   12628  *  4) Read or write the data using the data opcode (0x12)
   12629  *  5) Restore 769.17.2 to its original value
   12630  *
   12631  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   12632  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   12633  *
   12634  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   12635  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   12636  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   12637  */
   12638 static int
   12639 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   12640     bool page_set)
   12641 {
   12642 	struct wm_softc *sc = device_private(dev);
   12643 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   12644 	uint16_t page = BM_PHY_REG_PAGE(offset);
   12645 	uint16_t wuce;
   12646 	int rv = 0;
   12647 
   12648 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12649 		device_xname(dev), __func__));
   12650 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   12651 	if ((sc->sc_type == WM_T_PCH)
   12652 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   12653 		device_printf(dev,
   12654 		    "Attempting to access page %d while gig enabled.\n", page);
   12655 	}
   12656 
   12657 	if (!page_set) {
   12658 		/* Enable access to PHY wakeup registers */
   12659 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   12660 		if (rv != 0) {
   12661 			device_printf(dev,
   12662 			    "%s: Could not enable PHY wakeup reg access\n",
   12663 			    __func__);
   12664 			return rv;
   12665 		}
   12666 	}
   12667 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   12668 		device_xname(sc->sc_dev), __func__, page, regnum));
   12669 
   12670 	/*
   12671 	 * 2) Access PHY wakeup register.
   12672 	 * See wm_access_phy_wakeup_reg_bm.
   12673 	 */
   12674 
   12675 	/* Write the Wakeup register page offset value using opcode 0x11 */
   12676 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   12677 	if (rv != 0)
   12678 		return rv;
   12679 
   12680 	if (rd) {
   12681 		/* Read the Wakeup register page value using opcode 0x12 */
   12682 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   12683 	} else {
   12684 		/* Write the Wakeup register page value using opcode 0x12 */
   12685 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   12686 	}
   12687 	if (rv != 0)
   12688 		return rv;
   12689 
   12690 	if (!page_set)
   12691 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   12692 
   12693 	return rv;
   12694 }
   12695 
   12696 /*
   12697  * wm_gmii_hv_readreg:	[mii interface function]
   12698  *
   12699  *	Read a PHY register on the kumeran
   12700  * This could be handled by the PHY layer if we didn't have to lock the
   12701  * resource ...
   12702  */
   12703 static int
   12704 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12705 {
   12706 	struct wm_softc *sc = device_private(dev);
   12707 	int rv;
   12708 
   12709 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12710 		device_xname(dev), __func__));
   12711 
   12712 	rv = sc->phy.acquire(sc);
   12713 	if (rv != 0) {
   12714 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12715 		return rv;
   12716 	}
   12717 
   12718 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   12719 	sc->phy.release(sc);
   12720 	return rv;
   12721 }
   12722 
   12723 static int
   12724 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12725 {
   12726 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12727 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12728 	int rv;
   12729 
   12730 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12731 
   12732 	/* Page 800 works differently than the rest so it has its own func */
   12733 	if (page == BM_WUC_PAGE)
   12734 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   12735 
   12736 	/*
   12737 	 * Lower than page 768 works differently than the rest so it has its
   12738 	 * own func
   12739 	 */
   12740 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12741 		device_printf(dev, "gmii_hv_readreg!!!\n");
   12742 		return -1;
   12743 	}
   12744 
   12745 	/*
   12746 	 * XXX I21[789] documents say that the SMBus Address register is at
   12747 	 * PHY address 01, Page 0 (not 768), Register 26.
   12748 	 */
   12749 	if (page == HV_INTC_FC_PAGE_START)
   12750 		page = 0;
   12751 
   12752 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12753 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12754 		    page << BME1000_PAGE_SHIFT);
   12755 		if (rv != 0)
   12756 			return rv;
   12757 	}
   12758 
   12759 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   12760 }
   12761 
   12762 /*
   12763  * wm_gmii_hv_writereg:	[mii interface function]
   12764  *
   12765  *	Write a PHY register on the kumeran.
   12766  * This could be handled by the PHY layer if we didn't have to lock the
   12767  * resource ...
   12768  */
   12769 static int
   12770 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   12771 {
   12772 	struct wm_softc *sc = device_private(dev);
   12773 	int rv;
   12774 
   12775 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12776 		device_xname(dev), __func__));
   12777 
   12778 	rv = sc->phy.acquire(sc);
   12779 	if (rv != 0) {
   12780 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12781 		return rv;
   12782 	}
   12783 
   12784 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   12785 	sc->phy.release(sc);
   12786 
   12787 	return rv;
   12788 }
   12789 
   12790 static int
   12791 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12792 {
   12793 	struct wm_softc *sc = device_private(dev);
   12794 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12795 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12796 	int rv;
   12797 
   12798 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12799 
   12800 	/* Page 800 works differently than the rest so it has its own func */
   12801 	if (page == BM_WUC_PAGE)
   12802 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   12803 		    false);
   12804 
   12805 	/*
   12806 	 * Lower than page 768 works differently than the rest so it has its
   12807 	 * own func
   12808 	 */
   12809 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12810 		device_printf(dev, "gmii_hv_writereg!!!\n");
   12811 		return -1;
   12812 	}
   12813 
   12814 	{
   12815 		/*
   12816 		 * XXX I21[789] documents say that the SMBus Address register
   12817 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   12818 		 */
   12819 		if (page == HV_INTC_FC_PAGE_START)
   12820 			page = 0;
   12821 
   12822 		/*
   12823 		 * XXX Workaround MDIO accesses being disabled after entering
   12824 		 * IEEE Power Down (whenever bit 11 of the PHY control
   12825 		 * register is set)
   12826 		 */
   12827 		if (sc->sc_phytype == WMPHY_82578) {
   12828 			struct mii_softc *child;
   12829 
   12830 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12831 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   12832 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   12833 			    && ((val & (1 << 11)) != 0)) {
   12834 				device_printf(dev, "XXX need workaround\n");
   12835 			}
   12836 		}
   12837 
   12838 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12839 			rv = wm_gmii_mdic_writereg(dev, 1,
   12840 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12841 			if (rv != 0)
   12842 				return rv;
   12843 		}
   12844 	}
   12845 
   12846 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   12847 }
   12848 
   12849 /*
   12850  * wm_gmii_82580_readreg:	[mii interface function]
   12851  *
   12852  *	Read a PHY register on the 82580 and I350.
   12853  * This could be handled by the PHY layer if we didn't have to lock the
   12854  * resource ...
   12855  */
   12856 static int
   12857 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12858 {
   12859 	struct wm_softc *sc = device_private(dev);
   12860 	int rv;
   12861 
   12862 	rv = sc->phy.acquire(sc);
   12863 	if (rv != 0) {
   12864 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12865 		return rv;
   12866 	}
   12867 
   12868 #ifdef DIAGNOSTIC
   12869 	if (reg > MII_ADDRMASK) {
   12870 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12871 		    __func__, sc->sc_phytype, reg);
   12872 		reg &= MII_ADDRMASK;
   12873 	}
   12874 #endif
   12875 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   12876 
   12877 	sc->phy.release(sc);
   12878 	return rv;
   12879 }
   12880 
   12881 /*
   12882  * wm_gmii_82580_writereg:	[mii interface function]
   12883  *
   12884  *	Write a PHY register on the 82580 and I350.
   12885  * This could be handled by the PHY layer if we didn't have to lock the
   12886  * resource ...
   12887  */
   12888 static int
   12889 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   12890 {
   12891 	struct wm_softc *sc = device_private(dev);
   12892 	int rv;
   12893 
   12894 	rv = sc->phy.acquire(sc);
   12895 	if (rv != 0) {
   12896 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12897 		return rv;
   12898 	}
   12899 
   12900 #ifdef DIAGNOSTIC
   12901 	if (reg > MII_ADDRMASK) {
   12902 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12903 		    __func__, sc->sc_phytype, reg);
   12904 		reg &= MII_ADDRMASK;
   12905 	}
   12906 #endif
   12907 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   12908 
   12909 	sc->phy.release(sc);
   12910 	return rv;
   12911 }
   12912 
   12913 /*
   12914  * wm_gmii_gs40g_readreg:	[mii interface function]
   12915  *
   12916  *	Read a PHY register on the I2100 and I211.
   12917  * This could be handled by the PHY layer if we didn't have to lock the
   12918  * resource ...
   12919  */
   12920 static int
   12921 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12922 {
   12923 	struct wm_softc *sc = device_private(dev);
   12924 	int page, offset;
   12925 	int rv;
   12926 
   12927 	/* Acquire semaphore */
   12928 	rv = sc->phy.acquire(sc);
   12929 	if (rv != 0) {
   12930 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12931 		return rv;
   12932 	}
   12933 
   12934 	/* Page select */
   12935 	page = reg >> GS40G_PAGE_SHIFT;
   12936 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12937 	if (rv != 0)
   12938 		goto release;
   12939 
   12940 	/* Read reg */
   12941 	offset = reg & GS40G_OFFSET_MASK;
   12942 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   12943 
   12944 release:
   12945 	sc->phy.release(sc);
   12946 	return rv;
   12947 }
   12948 
   12949 /*
   12950  * wm_gmii_gs40g_writereg:	[mii interface function]
   12951  *
   12952  *	Write a PHY register on the I210 and I211.
   12953  * This could be handled by the PHY layer if we didn't have to lock the
   12954  * resource ...
   12955  */
   12956 static int
   12957 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   12958 {
   12959 	struct wm_softc *sc = device_private(dev);
   12960 	uint16_t page;
   12961 	int offset, rv;
   12962 
   12963 	/* Acquire semaphore */
   12964 	rv = sc->phy.acquire(sc);
   12965 	if (rv != 0) {
   12966 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12967 		return rv;
   12968 	}
   12969 
   12970 	/* Page select */
   12971 	page = reg >> GS40G_PAGE_SHIFT;
   12972 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12973 	if (rv != 0)
   12974 		goto release;
   12975 
   12976 	/* Write reg */
   12977 	offset = reg & GS40G_OFFSET_MASK;
   12978 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   12979 
   12980 release:
   12981 	/* Release semaphore */
   12982 	sc->phy.release(sc);
   12983 	return rv;
   12984 }
   12985 
   12986 /*
   12987  * wm_gmii_statchg:	[mii interface function]
   12988  *
   12989  *	Callback from MII layer when media changes.
   12990  */
   12991 static void
   12992 wm_gmii_statchg(struct ifnet *ifp)
   12993 {
   12994 	struct wm_softc *sc = ifp->if_softc;
   12995 	struct mii_data *mii = &sc->sc_mii;
   12996 
   12997 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   12998 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12999 	sc->sc_fcrtl &= ~FCRTL_XONE;
   13000 
   13001 	/* Get flow control negotiation result. */
   13002 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   13003 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   13004 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   13005 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   13006 	}
   13007 
   13008 	if (sc->sc_flowflags & IFM_FLOW) {
   13009 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   13010 			sc->sc_ctrl |= CTRL_TFCE;
   13011 			sc->sc_fcrtl |= FCRTL_XONE;
   13012 		}
   13013 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   13014 			sc->sc_ctrl |= CTRL_RFCE;
   13015 	}
   13016 
   13017 	if (mii->mii_media_active & IFM_FDX) {
   13018 		DPRINTF(sc, WM_DEBUG_LINK,
   13019 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   13020 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   13021 	} else {
   13022 		DPRINTF(sc, WM_DEBUG_LINK,
   13023 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   13024 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   13025 	}
   13026 
   13027 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13028 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   13029 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   13030 	    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   13031 	if (sc->sc_type == WM_T_80003) {
   13032 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   13033 		case IFM_1000_T:
   13034 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   13035 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   13036 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   13037 			break;
   13038 		default:
   13039 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   13040 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   13041 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   13042 			break;
   13043 		}
   13044 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   13045 	}
   13046 }
   13047 
   13048 /* kumeran related (80003, ICH* and PCH*) */
   13049 
   13050 /*
   13051  * wm_kmrn_readreg:
   13052  *
   13053  *	Read a kumeran register
   13054  */
   13055 static int
   13056 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   13057 {
   13058 	int rv;
   13059 
   13060 	if (sc->sc_type == WM_T_80003)
   13061 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   13062 	else
   13063 		rv = sc->phy.acquire(sc);
   13064 	if (rv != 0) {
   13065 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   13066 		    __func__);
   13067 		return rv;
   13068 	}
   13069 
   13070 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   13071 
   13072 	if (sc->sc_type == WM_T_80003)
   13073 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   13074 	else
   13075 		sc->phy.release(sc);
   13076 
   13077 	return rv;
   13078 }
   13079 
   13080 static int
   13081 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   13082 {
   13083 
   13084 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   13085 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   13086 	    KUMCTRLSTA_REN);
   13087 	CSR_WRITE_FLUSH(sc);
   13088 	delay(2);
   13089 
   13090 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   13091 
   13092 	return 0;
   13093 }
   13094 
   13095 /*
   13096  * wm_kmrn_writereg:
   13097  *
   13098  *	Write a kumeran register
   13099  */
   13100 static int
   13101 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   13102 {
   13103 	int rv;
   13104 
   13105 	if (sc->sc_type == WM_T_80003)
   13106 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   13107 	else
   13108 		rv = sc->phy.acquire(sc);
   13109 	if (rv != 0) {
   13110 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   13111 		    __func__);
   13112 		return rv;
   13113 	}
   13114 
   13115 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   13116 
   13117 	if (sc->sc_type == WM_T_80003)
   13118 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   13119 	else
   13120 		sc->phy.release(sc);
   13121 
   13122 	return rv;
   13123 }
   13124 
   13125 static int
   13126 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   13127 {
   13128 
   13129 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   13130 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   13131 
   13132 	return 0;
   13133 }
   13134 
   13135 /*
   13136  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   13137  * This access method is different from IEEE MMD.
   13138  */
   13139 static int
   13140 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   13141 {
   13142 	struct wm_softc *sc = device_private(dev);
   13143 	int rv;
   13144 
   13145 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   13146 	if (rv != 0)
   13147 		return rv;
   13148 
   13149 	if (rd)
   13150 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   13151 	else
   13152 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   13153 	return rv;
   13154 }
   13155 
   13156 static int
   13157 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   13158 {
   13159 
   13160 	return wm_access_emi_reg_locked(dev, reg, val, true);
   13161 }
   13162 
   13163 static int
   13164 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   13165 {
   13166 
   13167 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   13168 }
   13169 
   13170 /* SGMII related */
   13171 
   13172 /*
   13173  * wm_sgmii_uses_mdio
   13174  *
   13175  * Check whether the transaction is to the internal PHY or the external
   13176  * MDIO interface. Return true if it's MDIO.
   13177  */
   13178 static bool
   13179 wm_sgmii_uses_mdio(struct wm_softc *sc)
   13180 {
   13181 	uint32_t reg;
   13182 	bool ismdio = false;
   13183 
   13184 	switch (sc->sc_type) {
   13185 	case WM_T_82575:
   13186 	case WM_T_82576:
   13187 		reg = CSR_READ(sc, WMREG_MDIC);
   13188 		ismdio = ((reg & MDIC_DEST) != 0);
   13189 		break;
   13190 	case WM_T_82580:
   13191 	case WM_T_I350:
   13192 	case WM_T_I354:
   13193 	case WM_T_I210:
   13194 	case WM_T_I211:
   13195 		reg = CSR_READ(sc, WMREG_MDICNFG);
   13196 		ismdio = ((reg & MDICNFG_DEST) != 0);
   13197 		break;
   13198 	default:
   13199 		break;
   13200 	}
   13201 
   13202 	return ismdio;
   13203 }
   13204 
   13205 /* Setup internal SGMII PHY for SFP */
   13206 static void
   13207 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
   13208 {
   13209 	uint16_t id1, id2, phyreg;
   13210 	int i, rv;
   13211 
   13212 	if (((sc->sc_flags & WM_F_SGMII) == 0)
   13213 	    || ((sc->sc_flags & WM_F_SFP) == 0))
   13214 		return;
   13215 
   13216 	for (i = 0; i < MII_NPHY; i++) {
   13217 		sc->phy.no_errprint = true;
   13218 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
   13219 		if (rv != 0)
   13220 			continue;
   13221 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
   13222 		if (rv != 0)
   13223 			continue;
   13224 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
   13225 			continue;
   13226 		sc->phy.no_errprint = false;
   13227 
   13228 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
   13229 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
   13230 		phyreg |= ESSR_SGMII_WOC_COPPER;
   13231 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
   13232 		break;
   13233 	}
   13234 
   13235 }
   13236 
   13237 /*
   13238  * wm_sgmii_readreg:	[mii interface function]
   13239  *
   13240  *	Read a PHY register on the SGMII
   13241  * This could be handled by the PHY layer if we didn't have to lock the
   13242  * resource ...
   13243  */
   13244 static int
   13245 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   13246 {
   13247 	struct wm_softc *sc = device_private(dev);
   13248 	int rv;
   13249 
   13250 	rv = sc->phy.acquire(sc);
   13251 	if (rv != 0) {
   13252 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   13253 		return rv;
   13254 	}
   13255 
   13256 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   13257 
   13258 	sc->phy.release(sc);
   13259 	return rv;
   13260 }
   13261 
   13262 static int
   13263 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   13264 {
   13265 	struct wm_softc *sc = device_private(dev);
   13266 	uint32_t i2ccmd;
   13267 	int i, rv = 0;
   13268 
   13269 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   13270 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   13271 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   13272 
   13273 	/* Poll the ready bit */
   13274 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   13275 		delay(50);
   13276 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   13277 		if (i2ccmd & I2CCMD_READY)
   13278 			break;
   13279 	}
   13280 	if ((i2ccmd & I2CCMD_READY) == 0) {
   13281 		device_printf(dev, "I2CCMD Read did not complete\n");
   13282 		rv = ETIMEDOUT;
   13283 	}
   13284 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   13285 		if (!sc->phy.no_errprint)
   13286 			device_printf(dev, "I2CCMD Error bit set\n");
   13287 		rv = EIO;
   13288 	}
   13289 
   13290 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   13291 
   13292 	return rv;
   13293 }
   13294 
   13295 /*
   13296  * wm_sgmii_writereg:	[mii interface function]
   13297  *
   13298  *	Write a PHY register on the SGMII.
   13299  * This could be handled by the PHY layer if we didn't have to lock the
   13300  * resource ...
   13301  */
   13302 static int
   13303 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   13304 {
   13305 	struct wm_softc *sc = device_private(dev);
   13306 	int rv;
   13307 
   13308 	rv = sc->phy.acquire(sc);
   13309 	if (rv != 0) {
   13310 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   13311 		return rv;
   13312 	}
   13313 
   13314 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   13315 
   13316 	sc->phy.release(sc);
   13317 
   13318 	return rv;
   13319 }
   13320 
   13321 static int
   13322 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   13323 {
   13324 	struct wm_softc *sc = device_private(dev);
   13325 	uint32_t i2ccmd;
   13326 	uint16_t swapdata;
   13327 	int rv = 0;
   13328 	int i;
   13329 
   13330 	/* Swap the data bytes for the I2C interface */
   13331 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   13332 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   13333 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   13334 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   13335 
   13336 	/* Poll the ready bit */
   13337 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   13338 		delay(50);
   13339 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   13340 		if (i2ccmd & I2CCMD_READY)
   13341 			break;
   13342 	}
   13343 	if ((i2ccmd & I2CCMD_READY) == 0) {
   13344 		device_printf(dev, "I2CCMD Write did not complete\n");
   13345 		rv = ETIMEDOUT;
   13346 	}
   13347 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   13348 		device_printf(dev, "I2CCMD Error bit set\n");
   13349 		rv = EIO;
   13350 	}
   13351 
   13352 	return rv;
   13353 }
   13354 
   13355 /* TBI related */
   13356 
   13357 static bool
   13358 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   13359 {
   13360 	bool sig;
   13361 
   13362 	sig = ctrl & CTRL_SWDPIN(1);
   13363 
   13364 	/*
   13365 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   13366 	 * detect a signal, 1 if they don't.
   13367 	 */
   13368 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   13369 		sig = !sig;
   13370 
   13371 	return sig;
   13372 }
   13373 
   13374 /*
   13375  * wm_tbi_mediainit:
   13376  *
   13377  *	Initialize media for use on 1000BASE-X devices.
   13378  */
   13379 static void
   13380 wm_tbi_mediainit(struct wm_softc *sc)
   13381 {
   13382 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   13383 	const char *sep = "";
   13384 
   13385 	if (sc->sc_type < WM_T_82543)
   13386 		sc->sc_tipg = TIPG_WM_DFLT;
   13387 	else
   13388 		sc->sc_tipg = TIPG_LG_DFLT;
   13389 
   13390 	sc->sc_tbi_serdes_anegticks = 5;
   13391 
   13392 	/* Initialize our media structures */
   13393 	sc->sc_mii.mii_ifp = ifp;
   13394 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   13395 
   13396 	ifp->if_baudrate = IF_Gbps(1);
   13397 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   13398 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13399 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   13400 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   13401 		    sc->sc_core_lock);
   13402 	} else {
   13403 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   13404 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   13405 	}
   13406 
   13407 	/*
   13408 	 * SWD Pins:
   13409 	 *
   13410 	 *	0 = Link LED (output)
   13411 	 *	1 = Loss Of Signal (input)
   13412 	 */
   13413 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   13414 
   13415 	/* XXX Perhaps this is only for TBI */
   13416 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13417 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   13418 
   13419 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   13420 		sc->sc_ctrl &= ~CTRL_LRST;
   13421 
   13422 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13423 
   13424 #define	ADD(ss, mm, dd)							  \
   13425 do {									  \
   13426 	aprint_normal("%s%s", sep, ss);					  \
   13427 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   13428 	sep = ", ";							  \
   13429 } while (/*CONSTCOND*/0)
   13430 
   13431 	aprint_normal_dev(sc->sc_dev, "");
   13432 
   13433 	if (sc->sc_type == WM_T_I354) {
   13434 		uint32_t status;
   13435 
   13436 		status = CSR_READ(sc, WMREG_STATUS);
   13437 		if (((status & STATUS_2P5_SKU) != 0)
   13438 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   13439 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   13440 		} else
   13441 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   13442 	} else if (sc->sc_type == WM_T_82545) {
   13443 		/* Only 82545 is LX (XXX except SFP) */
   13444 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   13445 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   13446 	} else if (sc->sc_sfptype != 0) {
   13447 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   13448 		switch (sc->sc_sfptype) {
   13449 		default:
   13450 		case SFF_SFP_ETH_FLAGS_1000SX:
   13451 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   13452 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   13453 			break;
   13454 		case SFF_SFP_ETH_FLAGS_1000LX:
   13455 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   13456 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   13457 			break;
   13458 		case SFF_SFP_ETH_FLAGS_1000CX:
   13459 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   13460 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   13461 			break;
   13462 		case SFF_SFP_ETH_FLAGS_1000T:
   13463 			ADD("1000baseT", IFM_1000_T, 0);
   13464 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   13465 			break;
   13466 		case SFF_SFP_ETH_FLAGS_100FX:
   13467 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   13468 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   13469 			break;
   13470 		}
   13471 	} else {
   13472 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   13473 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   13474 	}
   13475 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   13476 	aprint_normal("\n");
   13477 
   13478 #undef ADD
   13479 
   13480 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   13481 }
   13482 
   13483 /*
   13484  * wm_tbi_mediachange:	[ifmedia interface function]
   13485  *
   13486  *	Set hardware to newly-selected media on a 1000BASE-X device.
   13487  */
   13488 static int
   13489 wm_tbi_mediachange(struct ifnet *ifp)
   13490 {
   13491 	struct wm_softc *sc = ifp->if_softc;
   13492 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   13493 	uint32_t status, ctrl;
   13494 	bool signal;
   13495 	int i;
   13496 
   13497 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   13498 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   13499 		/* XXX need some work for >= 82571 and < 82575 */
   13500 		if (sc->sc_type < WM_T_82575)
   13501 			return 0;
   13502 	}
   13503 
   13504 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   13505 	    || (sc->sc_type >= WM_T_82575))
   13506 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   13507 
   13508 	sc->sc_ctrl &= ~CTRL_LRST;
   13509 	sc->sc_txcw = TXCW_ANE;
   13510 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13511 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   13512 	else if (ife->ifm_media & IFM_FDX)
   13513 		sc->sc_txcw |= TXCW_FD;
   13514 	else
   13515 		sc->sc_txcw |= TXCW_HD;
   13516 
   13517 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   13518 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   13519 
   13520 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   13521 		device_xname(sc->sc_dev), sc->sc_txcw));
   13522 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13523 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13524 	CSR_WRITE_FLUSH(sc);
   13525 	delay(1000);
   13526 
   13527 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13528 	signal = wm_tbi_havesignal(sc, ctrl);
   13529 
   13530 	DPRINTF(sc, WM_DEBUG_LINK,
   13531 	    ("%s: signal = %d\n", device_xname(sc->sc_dev), signal));
   13532 
   13533 	if (signal) {
   13534 		/* Have signal; wait for the link to come up. */
   13535 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   13536 			delay(10000);
   13537 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   13538 				break;
   13539 		}
   13540 
   13541 		DPRINTF(sc, WM_DEBUG_LINK,
   13542 		    ("%s: i = %d after waiting for link\n",
   13543 			device_xname(sc->sc_dev), i));
   13544 
   13545 		status = CSR_READ(sc, WMREG_STATUS);
   13546 		DPRINTF(sc, WM_DEBUG_LINK,
   13547 		    ("%s: status after final read = 0x%x, STATUS_LU = %#"
   13548 			__PRIxBIT "\n",
   13549 			device_xname(sc->sc_dev), status, STATUS_LU));
   13550 		if (status & STATUS_LU) {
   13551 			/* Link is up. */
   13552 			DPRINTF(sc, WM_DEBUG_LINK,
   13553 			    ("%s: LINK: set media -> link up %s\n",
   13554 				device_xname(sc->sc_dev),
   13555 				(status & STATUS_FD) ? "FDX" : "HDX"));
   13556 
   13557 			/*
   13558 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   13559 			 * so we should update sc->sc_ctrl
   13560 			 */
   13561 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   13562 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   13563 			sc->sc_fcrtl &= ~FCRTL_XONE;
   13564 			if (status & STATUS_FD)
   13565 				sc->sc_tctl |=
   13566 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   13567 			else
   13568 				sc->sc_tctl |=
   13569 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   13570 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   13571 				sc->sc_fcrtl |= FCRTL_XONE;
   13572 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   13573 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   13574 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   13575 			sc->sc_tbi_linkup = 1;
   13576 		} else {
   13577 			if (i == WM_LINKUP_TIMEOUT)
   13578 				wm_check_for_link(sc);
   13579 			/* Link is down. */
   13580 			DPRINTF(sc, WM_DEBUG_LINK,
   13581 			    ("%s: LINK: set media -> link down\n",
   13582 				device_xname(sc->sc_dev)));
   13583 			sc->sc_tbi_linkup = 0;
   13584 		}
   13585 	} else {
   13586 		DPRINTF(sc, WM_DEBUG_LINK,
   13587 		    ("%s: LINK: set media -> no signal\n",
   13588 			device_xname(sc->sc_dev)));
   13589 		sc->sc_tbi_linkup = 0;
   13590 	}
   13591 
   13592 	wm_tbi_serdes_set_linkled(sc);
   13593 
   13594 	return 0;
   13595 }
   13596 
   13597 /*
   13598  * wm_tbi_mediastatus:	[ifmedia interface function]
   13599  *
   13600  *	Get the current interface media status on a 1000BASE-X device.
   13601  */
   13602 static void
   13603 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13604 {
   13605 	struct wm_softc *sc = ifp->if_softc;
   13606 	uint32_t ctrl, status;
   13607 
   13608 	ifmr->ifm_status = IFM_AVALID;
   13609 	ifmr->ifm_active = IFM_ETHER;
   13610 
   13611 	status = CSR_READ(sc, WMREG_STATUS);
   13612 	if ((status & STATUS_LU) == 0) {
   13613 		ifmr->ifm_active |= IFM_NONE;
   13614 		return;
   13615 	}
   13616 
   13617 	ifmr->ifm_status |= IFM_ACTIVE;
   13618 	/* Only 82545 is LX */
   13619 	if (sc->sc_type == WM_T_82545)
   13620 		ifmr->ifm_active |= IFM_1000_LX;
   13621 	else
   13622 		ifmr->ifm_active |= IFM_1000_SX;
   13623 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   13624 		ifmr->ifm_active |= IFM_FDX;
   13625 	else
   13626 		ifmr->ifm_active |= IFM_HDX;
   13627 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13628 	if (ctrl & CTRL_RFCE)
   13629 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   13630 	if (ctrl & CTRL_TFCE)
   13631 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   13632 }
   13633 
   13634 /* XXX TBI only */
   13635 static int
   13636 wm_check_for_link(struct wm_softc *sc)
   13637 {
   13638 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   13639 	uint32_t rxcw;
   13640 	uint32_t ctrl;
   13641 	uint32_t status;
   13642 	bool signal;
   13643 
   13644 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
   13645 		device_xname(sc->sc_dev), __func__));
   13646 
   13647 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   13648 		/* XXX need some work for >= 82571 */
   13649 		if (sc->sc_type >= WM_T_82571) {
   13650 			sc->sc_tbi_linkup = 1;
   13651 			return 0;
   13652 		}
   13653 	}
   13654 
   13655 	rxcw = CSR_READ(sc, WMREG_RXCW);
   13656 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13657 	status = CSR_READ(sc, WMREG_STATUS);
   13658 	signal = wm_tbi_havesignal(sc, ctrl);
   13659 
   13660 	DPRINTF(sc, WM_DEBUG_LINK,
   13661 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   13662 		device_xname(sc->sc_dev), __func__, signal,
   13663 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   13664 
   13665 	/*
   13666 	 * SWDPIN   LU RXCW
   13667 	 *	0    0	  0
   13668 	 *	0    0	  1	(should not happen)
   13669 	 *	0    1	  0	(should not happen)
   13670 	 *	0    1	  1	(should not happen)
   13671 	 *	1    0	  0	Disable autonego and force linkup
   13672 	 *	1    0	  1	got /C/ but not linkup yet
   13673 	 *	1    1	  0	(linkup)
   13674 	 *	1    1	  1	If IFM_AUTO, back to autonego
   13675 	 *
   13676 	 */
   13677 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   13678 		DPRINTF(sc, WM_DEBUG_LINK,
   13679 		    ("%s: %s: force linkup and fullduplex\n",
   13680 			device_xname(sc->sc_dev), __func__));
   13681 		sc->sc_tbi_linkup = 0;
   13682 		/* Disable auto-negotiation in the TXCW register */
   13683 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   13684 
   13685 		/*
   13686 		 * Force link-up and also force full-duplex.
   13687 		 *
   13688 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   13689 		 * so we should update sc->sc_ctrl
   13690 		 */
   13691 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   13692 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13693 	} else if (((status & STATUS_LU) != 0)
   13694 	    && ((rxcw & RXCW_C) != 0)
   13695 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   13696 		sc->sc_tbi_linkup = 1;
   13697 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   13698 			device_xname(sc->sc_dev), __func__));
   13699 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13700 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   13701 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   13702 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
   13703 			device_xname(sc->sc_dev), __func__));
   13704 	} else {
   13705 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   13706 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   13707 			status));
   13708 	}
   13709 
   13710 	return 0;
   13711 }
   13712 
   13713 /*
   13714  * wm_tbi_tick:
   13715  *
   13716  *	Check the link on TBI devices.
   13717  *	This function acts as mii_tick().
   13718  */
   13719 static void
   13720 wm_tbi_tick(struct wm_softc *sc)
   13721 {
   13722 	struct mii_data *mii = &sc->sc_mii;
   13723 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13724 	uint32_t status;
   13725 
   13726 	KASSERT(mutex_owned(sc->sc_core_lock));
   13727 
   13728 	status = CSR_READ(sc, WMREG_STATUS);
   13729 
   13730 	/* XXX is this needed? */
   13731 	(void)CSR_READ(sc, WMREG_RXCW);
   13732 	(void)CSR_READ(sc, WMREG_CTRL);
   13733 
   13734 	/* set link status */
   13735 	if ((status & STATUS_LU) == 0) {
   13736 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   13737 			device_xname(sc->sc_dev)));
   13738 		sc->sc_tbi_linkup = 0;
   13739 	} else if (sc->sc_tbi_linkup == 0) {
   13740 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   13741 			device_xname(sc->sc_dev),
   13742 			(status & STATUS_FD) ? "FDX" : "HDX"));
   13743 		sc->sc_tbi_linkup = 1;
   13744 		sc->sc_tbi_serdes_ticks = 0;
   13745 	}
   13746 
   13747 	if ((sc->sc_if_flags & IFF_UP) == 0)
   13748 		goto setled;
   13749 
   13750 	if ((status & STATUS_LU) == 0) {
   13751 		sc->sc_tbi_linkup = 0;
   13752 		/* If the timer expired, retry autonegotiation */
   13753 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13754 		    && (++sc->sc_tbi_serdes_ticks
   13755 			>= sc->sc_tbi_serdes_anegticks)) {
   13756 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13757 				device_xname(sc->sc_dev), __func__));
   13758 			sc->sc_tbi_serdes_ticks = 0;
   13759 			/*
   13760 			 * Reset the link, and let autonegotiation do
   13761 			 * its thing
   13762 			 */
   13763 			sc->sc_ctrl |= CTRL_LRST;
   13764 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13765 			CSR_WRITE_FLUSH(sc);
   13766 			delay(1000);
   13767 			sc->sc_ctrl &= ~CTRL_LRST;
   13768 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13769 			CSR_WRITE_FLUSH(sc);
   13770 			delay(1000);
   13771 			CSR_WRITE(sc, WMREG_TXCW,
   13772 			    sc->sc_txcw & ~TXCW_ANE);
   13773 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13774 		}
   13775 	}
   13776 
   13777 setled:
   13778 	wm_tbi_serdes_set_linkled(sc);
   13779 }
   13780 
   13781 /* SERDES related */
   13782 static void
   13783 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   13784 {
   13785 	uint32_t reg;
   13786 
   13787 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13788 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13789 		return;
   13790 
   13791 	/* Enable PCS to turn on link */
   13792 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   13793 	reg |= PCS_CFG_PCS_EN;
   13794 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   13795 
   13796 	/* Power up the laser */
   13797 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13798 	reg &= ~CTRL_EXT_SWDPIN(3);
   13799 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13800 
   13801 	/* Flush the write to verify completion */
   13802 	CSR_WRITE_FLUSH(sc);
   13803 	delay(1000);
   13804 }
   13805 
   13806 static int
   13807 wm_serdes_mediachange(struct ifnet *ifp)
   13808 {
   13809 	struct wm_softc *sc = ifp->if_softc;
   13810 	bool pcs_autoneg = true; /* XXX */
   13811 	uint32_t ctrl_ext, pcs_lctl, reg;
   13812 
   13813 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13814 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13815 		return 0;
   13816 
   13817 	/* XXX Currently, this function is not called on 8257[12] */
   13818 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   13819 	    || (sc->sc_type >= WM_T_82575))
   13820 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   13821 
   13822 	/* Power on the sfp cage if present */
   13823 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13824 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   13825 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   13826 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13827 
   13828 	sc->sc_ctrl |= CTRL_SLU;
   13829 
   13830 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   13831 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   13832 
   13833 		reg = CSR_READ(sc, WMREG_CONNSW);
   13834 		reg |= CONNSW_ENRGSRC;
   13835 		CSR_WRITE(sc, WMREG_CONNSW, reg);
   13836 	}
   13837 
   13838 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   13839 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   13840 	case CTRL_EXT_LINK_MODE_SGMII:
   13841 		/* SGMII mode lets the phy handle forcing speed/duplex */
   13842 		pcs_autoneg = true;
   13843 		/* Autoneg time out should be disabled for SGMII mode */
   13844 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   13845 		break;
   13846 	case CTRL_EXT_LINK_MODE_1000KX:
   13847 		pcs_autoneg = false;
   13848 		/* FALLTHROUGH */
   13849 	default:
   13850 		if ((sc->sc_type == WM_T_82575)
   13851 		    || (sc->sc_type == WM_T_82576)) {
   13852 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   13853 				pcs_autoneg = false;
   13854 		}
   13855 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   13856 		    | CTRL_FRCFDX;
   13857 
   13858 		/* Set speed of 1000/Full if speed/duplex is forced */
   13859 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   13860 	}
   13861 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13862 
   13863 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   13864 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   13865 
   13866 	if (pcs_autoneg) {
   13867 		/* Set PCS register for autoneg */
   13868 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   13869 
   13870 		/* Disable force flow control for autoneg */
   13871 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   13872 
   13873 		/* Configure flow control advertisement for autoneg */
   13874 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   13875 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   13876 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   13877 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   13878 	} else
   13879 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   13880 
   13881 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   13882 
   13883 	return 0;
   13884 }
   13885 
   13886 static void
   13887 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13888 {
   13889 	struct wm_softc *sc = ifp->if_softc;
   13890 	struct mii_data *mii = &sc->sc_mii;
   13891 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13892 	uint32_t pcs_adv, pcs_lpab, reg;
   13893 
   13894 	ifmr->ifm_status = IFM_AVALID;
   13895 	ifmr->ifm_active = IFM_ETHER;
   13896 
   13897 	/* Check PCS */
   13898 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13899 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   13900 		ifmr->ifm_active |= IFM_NONE;
   13901 		sc->sc_tbi_linkup = 0;
   13902 		goto setled;
   13903 	}
   13904 
   13905 	sc->sc_tbi_linkup = 1;
   13906 	ifmr->ifm_status |= IFM_ACTIVE;
   13907 	if (sc->sc_type == WM_T_I354) {
   13908 		uint32_t status;
   13909 
   13910 		status = CSR_READ(sc, WMREG_STATUS);
   13911 		if (((status & STATUS_2P5_SKU) != 0)
   13912 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   13913 			ifmr->ifm_active |= IFM_2500_KX;
   13914 		} else
   13915 			ifmr->ifm_active |= IFM_1000_KX;
   13916 	} else {
   13917 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   13918 		case PCS_LSTS_SPEED_10:
   13919 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   13920 			break;
   13921 		case PCS_LSTS_SPEED_100:
   13922 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   13923 			break;
   13924 		case PCS_LSTS_SPEED_1000:
   13925 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13926 			break;
   13927 		default:
   13928 			device_printf(sc->sc_dev, "Unknown speed\n");
   13929 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13930 			break;
   13931 		}
   13932 	}
   13933 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   13934 	if ((reg & PCS_LSTS_FDX) != 0)
   13935 		ifmr->ifm_active |= IFM_FDX;
   13936 	else
   13937 		ifmr->ifm_active |= IFM_HDX;
   13938 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   13939 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   13940 		/* Check flow */
   13941 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13942 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   13943 			DPRINTF(sc, WM_DEBUG_LINK,
   13944 			    ("XXX LINKOK but not ACOMP\n"));
   13945 			goto setled;
   13946 		}
   13947 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   13948 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   13949 		DPRINTF(sc, WM_DEBUG_LINK,
   13950 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   13951 		if ((pcs_adv & TXCW_SYM_PAUSE)
   13952 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   13953 			mii->mii_media_active |= IFM_FLOW
   13954 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   13955 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   13956 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   13957 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   13958 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   13959 			mii->mii_media_active |= IFM_FLOW
   13960 			    | IFM_ETH_TXPAUSE;
   13961 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   13962 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   13963 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   13964 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   13965 			mii->mii_media_active |= IFM_FLOW
   13966 			    | IFM_ETH_RXPAUSE;
   13967 		}
   13968 	}
   13969 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   13970 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   13971 setled:
   13972 	wm_tbi_serdes_set_linkled(sc);
   13973 }
   13974 
   13975 /*
   13976  * wm_serdes_tick:
   13977  *
   13978  *	Check the link on serdes devices.
   13979  */
   13980 static void
   13981 wm_serdes_tick(struct wm_softc *sc)
   13982 {
   13983 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   13984 	struct mii_data *mii = &sc->sc_mii;
   13985 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13986 	uint32_t reg;
   13987 
   13988 	KASSERT(mutex_owned(sc->sc_core_lock));
   13989 
   13990 	mii->mii_media_status = IFM_AVALID;
   13991 	mii->mii_media_active = IFM_ETHER;
   13992 
   13993 	/* Check PCS */
   13994 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13995 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   13996 		mii->mii_media_status |= IFM_ACTIVE;
   13997 		sc->sc_tbi_linkup = 1;
   13998 		sc->sc_tbi_serdes_ticks = 0;
   13999 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   14000 		if ((reg & PCS_LSTS_FDX) != 0)
   14001 			mii->mii_media_active |= IFM_FDX;
   14002 		else
   14003 			mii->mii_media_active |= IFM_HDX;
   14004 	} else {
   14005 		mii->mii_media_status |= IFM_NONE;
   14006 		sc->sc_tbi_linkup = 0;
   14007 		/* If the timer expired, retry autonegotiation */
   14008 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   14009 		    && (++sc->sc_tbi_serdes_ticks
   14010 			>= sc->sc_tbi_serdes_anegticks)) {
   14011 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   14012 				device_xname(sc->sc_dev), __func__));
   14013 			sc->sc_tbi_serdes_ticks = 0;
   14014 			/* XXX */
   14015 			wm_serdes_mediachange(ifp);
   14016 		}
   14017 	}
   14018 
   14019 	wm_tbi_serdes_set_linkled(sc);
   14020 }
   14021 
   14022 /* SFP related */
   14023 
   14024 static int
   14025 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   14026 {
   14027 	uint32_t i2ccmd;
   14028 	int i;
   14029 
   14030 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   14031 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   14032 
   14033 	/* Poll the ready bit */
   14034 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   14035 		delay(50);
   14036 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   14037 		if (i2ccmd & I2CCMD_READY)
   14038 			break;
   14039 	}
   14040 	if ((i2ccmd & I2CCMD_READY) == 0)
   14041 		return -1;
   14042 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   14043 		return -1;
   14044 
   14045 	*data = i2ccmd & 0x00ff;
   14046 
   14047 	return 0;
   14048 }
   14049 
   14050 static uint32_t
   14051 wm_sfp_get_media_type(struct wm_softc *sc)
   14052 {
   14053 	uint32_t ctrl_ext;
   14054 	uint8_t val = 0;
   14055 	int timeout = 3;
   14056 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   14057 	int rv = -1;
   14058 
   14059 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   14060 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   14061 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   14062 	CSR_WRITE_FLUSH(sc);
   14063 
   14064 	/* Read SFP module data */
   14065 	while (timeout) {
   14066 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   14067 		if (rv == 0)
   14068 			break;
   14069 		delay(100*1000); /* XXX too big */
   14070 		timeout--;
   14071 	}
   14072 	if (rv != 0)
   14073 		goto out;
   14074 
   14075 	switch (val) {
   14076 	case SFF_SFP_ID_SFF:
   14077 		aprint_normal_dev(sc->sc_dev,
   14078 		    "Module/Connector soldered to board\n");
   14079 		break;
   14080 	case SFF_SFP_ID_SFP:
   14081 		sc->sc_flags |= WM_F_SFP;
   14082 		break;
   14083 	case SFF_SFP_ID_UNKNOWN:
   14084 		goto out;
   14085 	default:
   14086 		break;
   14087 	}
   14088 
   14089 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   14090 	if (rv != 0)
   14091 		goto out;
   14092 
   14093 	sc->sc_sfptype = val;
   14094 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   14095 		mediatype = WM_MEDIATYPE_SERDES;
   14096 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   14097 		sc->sc_flags |= WM_F_SGMII;
   14098 		mediatype = WM_MEDIATYPE_COPPER;
   14099 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   14100 		sc->sc_flags |= WM_F_SGMII;
   14101 		mediatype = WM_MEDIATYPE_SERDES;
   14102 	} else {
   14103 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   14104 		    __func__, sc->sc_sfptype);
   14105 		sc->sc_sfptype = 0; /* XXX unknown */
   14106 	}
   14107 
   14108 out:
   14109 	/* Restore I2C interface setting */
   14110 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   14111 
   14112 	return mediatype;
   14113 }
   14114 
   14115 /*
   14116  * NVM related.
   14117  * Microwire, SPI (w/wo EERD) and Flash.
   14118  */
   14119 
   14120 /* Both spi and uwire */
   14121 
   14122 /*
   14123  * wm_eeprom_sendbits:
   14124  *
   14125  *	Send a series of bits to the EEPROM.
   14126  */
   14127 static void
   14128 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   14129 {
   14130 	uint32_t reg;
   14131 	int x;
   14132 
   14133 	reg = CSR_READ(sc, WMREG_EECD);
   14134 
   14135 	for (x = nbits; x > 0; x--) {
   14136 		if (bits & (1U << (x - 1)))
   14137 			reg |= EECD_DI;
   14138 		else
   14139 			reg &= ~EECD_DI;
   14140 		CSR_WRITE(sc, WMREG_EECD, reg);
   14141 		CSR_WRITE_FLUSH(sc);
   14142 		delay(2);
   14143 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   14144 		CSR_WRITE_FLUSH(sc);
   14145 		delay(2);
   14146 		CSR_WRITE(sc, WMREG_EECD, reg);
   14147 		CSR_WRITE_FLUSH(sc);
   14148 		delay(2);
   14149 	}
   14150 }
   14151 
   14152 /*
   14153  * wm_eeprom_recvbits:
   14154  *
   14155  *	Receive a series of bits from the EEPROM.
   14156  */
   14157 static void
   14158 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   14159 {
   14160 	uint32_t reg, val;
   14161 	int x;
   14162 
   14163 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   14164 
   14165 	val = 0;
   14166 	for (x = nbits; x > 0; x--) {
   14167 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   14168 		CSR_WRITE_FLUSH(sc);
   14169 		delay(2);
   14170 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   14171 			val |= (1U << (x - 1));
   14172 		CSR_WRITE(sc, WMREG_EECD, reg);
   14173 		CSR_WRITE_FLUSH(sc);
   14174 		delay(2);
   14175 	}
   14176 	*valp = val;
   14177 }
   14178 
   14179 /* Microwire */
   14180 
   14181 /*
   14182  * wm_nvm_read_uwire:
   14183  *
   14184  *	Read a word from the EEPROM using the MicroWire protocol.
   14185  */
   14186 static int
   14187 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14188 {
   14189 	uint32_t reg, val;
   14190 	int i, rv;
   14191 
   14192 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14193 		device_xname(sc->sc_dev), __func__));
   14194 
   14195 	rv = sc->nvm.acquire(sc);
   14196 	if (rv != 0)
   14197 		return rv;
   14198 
   14199 	for (i = 0; i < wordcnt; i++) {
   14200 		/* Clear SK and DI. */
   14201 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   14202 		CSR_WRITE(sc, WMREG_EECD, reg);
   14203 
   14204 		/*
   14205 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   14206 		 * and Xen.
   14207 		 *
   14208 		 * We use this workaround only for 82540 because qemu's
   14209 		 * e1000 act as 82540.
   14210 		 */
   14211 		if (sc->sc_type == WM_T_82540) {
   14212 			reg |= EECD_SK;
   14213 			CSR_WRITE(sc, WMREG_EECD, reg);
   14214 			reg &= ~EECD_SK;
   14215 			CSR_WRITE(sc, WMREG_EECD, reg);
   14216 			CSR_WRITE_FLUSH(sc);
   14217 			delay(2);
   14218 		}
   14219 		/* XXX: end of workaround */
   14220 
   14221 		/* Set CHIP SELECT. */
   14222 		reg |= EECD_CS;
   14223 		CSR_WRITE(sc, WMREG_EECD, reg);
   14224 		CSR_WRITE_FLUSH(sc);
   14225 		delay(2);
   14226 
   14227 		/* Shift in the READ command. */
   14228 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   14229 
   14230 		/* Shift in address. */
   14231 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   14232 
   14233 		/* Shift out the data. */
   14234 		wm_eeprom_recvbits(sc, &val, 16);
   14235 		data[i] = val & 0xffff;
   14236 
   14237 		/* Clear CHIP SELECT. */
   14238 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   14239 		CSR_WRITE(sc, WMREG_EECD, reg);
   14240 		CSR_WRITE_FLUSH(sc);
   14241 		delay(2);
   14242 	}
   14243 
   14244 	sc->nvm.release(sc);
   14245 	return 0;
   14246 }
   14247 
   14248 /* SPI */
   14249 
   14250 /*
   14251  * Set SPI and FLASH related information from the EECD register.
   14252  * For 82541 and 82547, the word size is taken from EEPROM.
   14253  */
   14254 static int
   14255 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   14256 {
   14257 	int size;
   14258 	uint32_t reg;
   14259 	uint16_t data;
   14260 
   14261 	reg = CSR_READ(sc, WMREG_EECD);
   14262 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   14263 
   14264 	/* Read the size of NVM from EECD by default */
   14265 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   14266 	switch (sc->sc_type) {
   14267 	case WM_T_82541:
   14268 	case WM_T_82541_2:
   14269 	case WM_T_82547:
   14270 	case WM_T_82547_2:
   14271 		/* Set dummy value to access EEPROM */
   14272 		sc->sc_nvm_wordsize = 64;
   14273 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   14274 			aprint_error_dev(sc->sc_dev,
   14275 			    "%s: failed to read EEPROM size\n", __func__);
   14276 		}
   14277 		reg = data;
   14278 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   14279 		if (size == 0)
   14280 			size = 6; /* 64 word size */
   14281 		else
   14282 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   14283 		break;
   14284 	case WM_T_80003:
   14285 	case WM_T_82571:
   14286 	case WM_T_82572:
   14287 	case WM_T_82573: /* SPI case */
   14288 	case WM_T_82574: /* SPI case */
   14289 	case WM_T_82583: /* SPI case */
   14290 		size += NVM_WORD_SIZE_BASE_SHIFT;
   14291 		if (size > 14)
   14292 			size = 14;
   14293 		break;
   14294 	case WM_T_82575:
   14295 	case WM_T_82576:
   14296 	case WM_T_82580:
   14297 	case WM_T_I350:
   14298 	case WM_T_I354:
   14299 	case WM_T_I210:
   14300 	case WM_T_I211:
   14301 		size += NVM_WORD_SIZE_BASE_SHIFT;
   14302 		if (size > 15)
   14303 			size = 15;
   14304 		break;
   14305 	default:
   14306 		aprint_error_dev(sc->sc_dev,
   14307 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   14308 		return -1;
   14309 		break;
   14310 	}
   14311 
   14312 	sc->sc_nvm_wordsize = 1 << size;
   14313 
   14314 	return 0;
   14315 }
   14316 
   14317 /*
   14318  * wm_nvm_ready_spi:
   14319  *
   14320  *	Wait for a SPI EEPROM to be ready for commands.
   14321  */
   14322 static int
   14323 wm_nvm_ready_spi(struct wm_softc *sc)
   14324 {
   14325 	uint32_t val;
   14326 	int usec;
   14327 
   14328 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14329 		device_xname(sc->sc_dev), __func__));
   14330 
   14331 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   14332 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   14333 		wm_eeprom_recvbits(sc, &val, 8);
   14334 		if ((val & SPI_SR_RDY) == 0)
   14335 			break;
   14336 	}
   14337 	if (usec >= SPI_MAX_RETRIES) {
   14338 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   14339 		return -1;
   14340 	}
   14341 	return 0;
   14342 }
   14343 
   14344 /*
   14345  * wm_nvm_read_spi:
   14346  *
   14347  *	Read a work from the EEPROM using the SPI protocol.
   14348  */
   14349 static int
   14350 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14351 {
   14352 	uint32_t reg, val;
   14353 	int i;
   14354 	uint8_t opc;
   14355 	int rv;
   14356 
   14357 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14358 		device_xname(sc->sc_dev), __func__));
   14359 
   14360 	rv = sc->nvm.acquire(sc);
   14361 	if (rv != 0)
   14362 		return rv;
   14363 
   14364 	/* Clear SK and CS. */
   14365 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   14366 	CSR_WRITE(sc, WMREG_EECD, reg);
   14367 	CSR_WRITE_FLUSH(sc);
   14368 	delay(2);
   14369 
   14370 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   14371 		goto out;
   14372 
   14373 	/* Toggle CS to flush commands. */
   14374 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   14375 	CSR_WRITE_FLUSH(sc);
   14376 	delay(2);
   14377 	CSR_WRITE(sc, WMREG_EECD, reg);
   14378 	CSR_WRITE_FLUSH(sc);
   14379 	delay(2);
   14380 
   14381 	opc = SPI_OPC_READ;
   14382 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   14383 		opc |= SPI_OPC_A8;
   14384 
   14385 	wm_eeprom_sendbits(sc, opc, 8);
   14386 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   14387 
   14388 	for (i = 0; i < wordcnt; i++) {
   14389 		wm_eeprom_recvbits(sc, &val, 16);
   14390 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   14391 	}
   14392 
   14393 	/* Raise CS and clear SK. */
   14394 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   14395 	CSR_WRITE(sc, WMREG_EECD, reg);
   14396 	CSR_WRITE_FLUSH(sc);
   14397 	delay(2);
   14398 
   14399 out:
   14400 	sc->nvm.release(sc);
   14401 	return rv;
   14402 }
   14403 
   14404 /* Using with EERD */
   14405 
   14406 static int
   14407 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   14408 {
   14409 	uint32_t attempts = 100000;
   14410 	uint32_t i, reg = 0;
   14411 	int32_t done = -1;
   14412 
   14413 	for (i = 0; i < attempts; i++) {
   14414 		reg = CSR_READ(sc, rw);
   14415 
   14416 		if (reg & EERD_DONE) {
   14417 			done = 0;
   14418 			break;
   14419 		}
   14420 		delay(5);
   14421 	}
   14422 
   14423 	return done;
   14424 }
   14425 
   14426 static int
   14427 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   14428 {
   14429 	int i, eerd = 0;
   14430 	int rv;
   14431 
   14432 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14433 		device_xname(sc->sc_dev), __func__));
   14434 
   14435 	rv = sc->nvm.acquire(sc);
   14436 	if (rv != 0)
   14437 		return rv;
   14438 
   14439 	for (i = 0; i < wordcnt; i++) {
   14440 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   14441 		CSR_WRITE(sc, WMREG_EERD, eerd);
   14442 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   14443 		if (rv != 0) {
   14444 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   14445 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   14446 			break;
   14447 		}
   14448 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   14449 	}
   14450 
   14451 	sc->nvm.release(sc);
   14452 	return rv;
   14453 }
   14454 
   14455 /* Flash */
   14456 
   14457 static int
   14458 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   14459 {
   14460 	uint32_t eecd;
   14461 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   14462 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   14463 	uint32_t nvm_dword = 0;
   14464 	uint8_t sig_byte = 0;
   14465 	int rv;
   14466 
   14467 	switch (sc->sc_type) {
   14468 	case WM_T_PCH_SPT:
   14469 	case WM_T_PCH_CNP:
   14470 	case WM_T_PCH_TGP:
   14471 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   14472 		act_offset = ICH_NVM_SIG_WORD * 2;
   14473 
   14474 		/* Set bank to 0 in case flash read fails. */
   14475 		*bank = 0;
   14476 
   14477 		/* Check bank 0 */
   14478 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   14479 		if (rv != 0)
   14480 			return rv;
   14481 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   14482 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14483 			*bank = 0;
   14484 			return 0;
   14485 		}
   14486 
   14487 		/* Check bank 1 */
   14488 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   14489 		    &nvm_dword);
   14490 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   14491 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14492 			*bank = 1;
   14493 			return 0;
   14494 		}
   14495 		aprint_error_dev(sc->sc_dev,
   14496 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   14497 		return -1;
   14498 	case WM_T_ICH8:
   14499 	case WM_T_ICH9:
   14500 		eecd = CSR_READ(sc, WMREG_EECD);
   14501 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   14502 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   14503 			return 0;
   14504 		}
   14505 		/* FALLTHROUGH */
   14506 	default:
   14507 		/* Default to 0 */
   14508 		*bank = 0;
   14509 
   14510 		/* Check bank 0 */
   14511 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   14512 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14513 			*bank = 0;
   14514 			return 0;
   14515 		}
   14516 
   14517 		/* Check bank 1 */
   14518 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   14519 		    &sig_byte);
   14520 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14521 			*bank = 1;
   14522 			return 0;
   14523 		}
   14524 	}
   14525 
   14526 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   14527 		device_xname(sc->sc_dev)));
   14528 	return -1;
   14529 }
   14530 
   14531 /******************************************************************************
   14532  * This function does initial flash setup so that a new read/write/erase cycle
   14533  * can be started.
   14534  *
   14535  * sc - The pointer to the hw structure
   14536  ****************************************************************************/
   14537 static int32_t
   14538 wm_ich8_cycle_init(struct wm_softc *sc)
   14539 {
   14540 	uint16_t hsfsts;
   14541 	int32_t error = 1;
   14542 	int32_t i     = 0;
   14543 
   14544 	if (sc->sc_type >= WM_T_PCH_SPT)
   14545 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   14546 	else
   14547 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   14548 
   14549 	/* May be check the Flash Des Valid bit in Hw status */
   14550 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   14551 		return error;
   14552 
   14553 	/* Clear FCERR in Hw status by writing 1 */
   14554 	/* Clear DAEL in Hw status by writing a 1 */
   14555 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   14556 
   14557 	if (sc->sc_type >= WM_T_PCH_SPT)
   14558 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   14559 	else
   14560 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   14561 
   14562 	/*
   14563 	 * Either we should have a hardware SPI cycle in progress bit to check
   14564 	 * against, in order to start a new cycle or FDONE bit should be
   14565 	 * changed in the hardware so that it is 1 after hardware reset, which
   14566 	 * can then be used as an indication whether a cycle is in progress or
   14567 	 * has been completed .. we should also have some software semaphore
   14568 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   14569 	 * threads access to those bits can be sequentiallized or a way so that
   14570 	 * 2 threads don't start the cycle at the same time
   14571 	 */
   14572 
   14573 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   14574 		/*
   14575 		 * There is no cycle running at present, so we can start a
   14576 		 * cycle
   14577 		 */
   14578 
   14579 		/* Begin by setting Flash Cycle Done. */
   14580 		hsfsts |= HSFSTS_DONE;
   14581 		if (sc->sc_type >= WM_T_PCH_SPT)
   14582 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14583 			    hsfsts & 0xffffUL);
   14584 		else
   14585 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   14586 		error = 0;
   14587 	} else {
   14588 		/*
   14589 		 * Otherwise poll for sometime so the current cycle has a
   14590 		 * chance to end before giving up.
   14591 		 */
   14592 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   14593 			if (sc->sc_type >= WM_T_PCH_SPT)
   14594 				hsfsts = ICH8_FLASH_READ32(sc,
   14595 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14596 			else
   14597 				hsfsts = ICH8_FLASH_READ16(sc,
   14598 				    ICH_FLASH_HSFSTS);
   14599 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   14600 				error = 0;
   14601 				break;
   14602 			}
   14603 			delay(1);
   14604 		}
   14605 		if (error == 0) {
   14606 			/*
   14607 			 * Successful in waiting for previous cycle to timeout,
   14608 			 * now set the Flash Cycle Done.
   14609 			 */
   14610 			hsfsts |= HSFSTS_DONE;
   14611 			if (sc->sc_type >= WM_T_PCH_SPT)
   14612 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14613 				    hsfsts & 0xffffUL);
   14614 			else
   14615 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   14616 				    hsfsts);
   14617 		}
   14618 	}
   14619 	return error;
   14620 }
   14621 
   14622 /******************************************************************************
   14623  * This function starts a flash cycle and waits for its completion
   14624  *
   14625  * sc - The pointer to the hw structure
   14626  ****************************************************************************/
   14627 static int32_t
   14628 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   14629 {
   14630 	uint16_t hsflctl;
   14631 	uint16_t hsfsts;
   14632 	int32_t error = 1;
   14633 	uint32_t i = 0;
   14634 
   14635 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   14636 	if (sc->sc_type >= WM_T_PCH_SPT)
   14637 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   14638 	else
   14639 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14640 	hsflctl |= HSFCTL_GO;
   14641 	if (sc->sc_type >= WM_T_PCH_SPT)
   14642 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14643 		    (uint32_t)hsflctl << 16);
   14644 	else
   14645 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14646 
   14647 	/* Wait till FDONE bit is set to 1 */
   14648 	do {
   14649 		if (sc->sc_type >= WM_T_PCH_SPT)
   14650 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14651 			    & 0xffffUL;
   14652 		else
   14653 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   14654 		if (hsfsts & HSFSTS_DONE)
   14655 			break;
   14656 		delay(1);
   14657 		i++;
   14658 	} while (i < timeout);
   14659 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   14660 		error = 0;
   14661 
   14662 	return error;
   14663 }
   14664 
   14665 /******************************************************************************
   14666  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   14667  *
   14668  * sc - The pointer to the hw structure
   14669  * index - The index of the byte or word to read.
   14670  * size - Size of data to read, 1=byte 2=word, 4=dword
   14671  * data - Pointer to the word to store the value read.
   14672  *****************************************************************************/
   14673 static int32_t
   14674 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   14675     uint32_t size, uint32_t *data)
   14676 {
   14677 	uint16_t hsfsts;
   14678 	uint16_t hsflctl;
   14679 	uint32_t flash_linear_address;
   14680 	uint32_t flash_data = 0;
   14681 	int32_t error = 1;
   14682 	int32_t count = 0;
   14683 
   14684 	if (size < 1  || size > 4 || data == 0x0 ||
   14685 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   14686 		return error;
   14687 
   14688 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   14689 	    sc->sc_ich8_flash_base;
   14690 
   14691 	do {
   14692 		delay(1);
   14693 		/* Steps */
   14694 		error = wm_ich8_cycle_init(sc);
   14695 		if (error)
   14696 			break;
   14697 
   14698 		if (sc->sc_type >= WM_T_PCH_SPT)
   14699 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14700 			    >> 16;
   14701 		else
   14702 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14703 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   14704 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   14705 		    & HSFCTL_BCOUNT_MASK;
   14706 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   14707 		if (sc->sc_type >= WM_T_PCH_SPT) {
   14708 			/*
   14709 			 * In SPT, This register is in Lan memory space, not
   14710 			 * flash. Therefore, only 32 bit access is supported.
   14711 			 */
   14712 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14713 			    (uint32_t)hsflctl << 16);
   14714 		} else
   14715 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14716 
   14717 		/*
   14718 		 * Write the last 24 bits of index into Flash Linear address
   14719 		 * field in Flash Address
   14720 		 */
   14721 		/* TODO: TBD maybe check the index against the size of flash */
   14722 
   14723 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   14724 
   14725 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   14726 
   14727 		/*
   14728 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   14729 		 * the whole sequence a few more times, else read in (shift in)
   14730 		 * the Flash Data0, the order is least significant byte first
   14731 		 * msb to lsb
   14732 		 */
   14733 		if (error == 0) {
   14734 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   14735 			if (size == 1)
   14736 				*data = (uint8_t)(flash_data & 0x000000FF);
   14737 			else if (size == 2)
   14738 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   14739 			else if (size == 4)
   14740 				*data = (uint32_t)flash_data;
   14741 			break;
   14742 		} else {
   14743 			/*
   14744 			 * If we've gotten here, then things are probably
   14745 			 * completely hosed, but if the error condition is
   14746 			 * detected, it won't hurt to give it another try...
   14747 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   14748 			 */
   14749 			if (sc->sc_type >= WM_T_PCH_SPT)
   14750 				hsfsts = ICH8_FLASH_READ32(sc,
   14751 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14752 			else
   14753 				hsfsts = ICH8_FLASH_READ16(sc,
   14754 				    ICH_FLASH_HSFSTS);
   14755 
   14756 			if (hsfsts & HSFSTS_ERR) {
   14757 				/* Repeat for some time before giving up. */
   14758 				continue;
   14759 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   14760 				break;
   14761 		}
   14762 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   14763 
   14764 	return error;
   14765 }
   14766 
   14767 /******************************************************************************
   14768  * Reads a single byte from the NVM using the ICH8 flash access registers.
   14769  *
   14770  * sc - pointer to wm_hw structure
   14771  * index - The index of the byte to read.
   14772  * data - Pointer to a byte to store the value read.
   14773  *****************************************************************************/
   14774 static int32_t
   14775 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   14776 {
   14777 	int32_t status;
   14778 	uint32_t word = 0;
   14779 
   14780 	status = wm_read_ich8_data(sc, index, 1, &word);
   14781 	if (status == 0)
   14782 		*data = (uint8_t)word;
   14783 	else
   14784 		*data = 0;
   14785 
   14786 	return status;
   14787 }
   14788 
   14789 /******************************************************************************
   14790  * Reads a word from the NVM using the ICH8 flash access registers.
   14791  *
   14792  * sc - pointer to wm_hw structure
   14793  * index - The starting byte index of the word to read.
   14794  * data - Pointer to a word to store the value read.
   14795  *****************************************************************************/
   14796 static int32_t
   14797 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   14798 {
   14799 	int32_t status;
   14800 	uint32_t word = 0;
   14801 
   14802 	status = wm_read_ich8_data(sc, index, 2, &word);
   14803 	if (status == 0)
   14804 		*data = (uint16_t)word;
   14805 	else
   14806 		*data = 0;
   14807 
   14808 	return status;
   14809 }
   14810 
   14811 /******************************************************************************
   14812  * Reads a dword from the NVM using the ICH8 flash access registers.
   14813  *
   14814  * sc - pointer to wm_hw structure
   14815  * index - The starting byte index of the word to read.
   14816  * data - Pointer to a word to store the value read.
   14817  *****************************************************************************/
   14818 static int32_t
   14819 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   14820 {
   14821 	int32_t status;
   14822 
   14823 	status = wm_read_ich8_data(sc, index, 4, data);
   14824 	return status;
   14825 }
   14826 
   14827 /******************************************************************************
   14828  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   14829  * register.
   14830  *
   14831  * sc - Struct containing variables accessed by shared code
   14832  * offset - offset of word in the EEPROM to read
   14833  * data - word read from the EEPROM
   14834  * words - number of words to read
   14835  *****************************************************************************/
   14836 static int
   14837 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14838 {
   14839 	int rv;
   14840 	uint32_t flash_bank = 0;
   14841 	uint32_t act_offset = 0;
   14842 	uint32_t bank_offset = 0;
   14843 	uint16_t word = 0;
   14844 	uint16_t i = 0;
   14845 
   14846 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14847 		device_xname(sc->sc_dev), __func__));
   14848 
   14849 	rv = sc->nvm.acquire(sc);
   14850 	if (rv != 0)
   14851 		return rv;
   14852 
   14853 	/*
   14854 	 * We need to know which is the valid flash bank.  In the event
   14855 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14856 	 * managing flash_bank. So it cannot be trusted and needs
   14857 	 * to be updated with each read.
   14858 	 */
   14859 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14860 	if (rv) {
   14861 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14862 			device_xname(sc->sc_dev)));
   14863 		flash_bank = 0;
   14864 	}
   14865 
   14866 	/*
   14867 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14868 	 * size
   14869 	 */
   14870 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14871 
   14872 	for (i = 0; i < words; i++) {
   14873 		/* The NVM part needs a byte offset, hence * 2 */
   14874 		act_offset = bank_offset + ((offset + i) * 2);
   14875 		rv = wm_read_ich8_word(sc, act_offset, &word);
   14876 		if (rv) {
   14877 			aprint_error_dev(sc->sc_dev,
   14878 			    "%s: failed to read NVM\n", __func__);
   14879 			break;
   14880 		}
   14881 		data[i] = word;
   14882 	}
   14883 
   14884 	sc->nvm.release(sc);
   14885 	return rv;
   14886 }
   14887 
   14888 /******************************************************************************
   14889  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   14890  * register.
   14891  *
   14892  * sc - Struct containing variables accessed by shared code
   14893  * offset - offset of word in the EEPROM to read
   14894  * data - word read from the EEPROM
   14895  * words - number of words to read
   14896  *****************************************************************************/
   14897 static int
   14898 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14899 {
   14900 	int	 rv;
   14901 	uint32_t flash_bank = 0;
   14902 	uint32_t act_offset = 0;
   14903 	uint32_t bank_offset = 0;
   14904 	uint32_t dword = 0;
   14905 	uint16_t i = 0;
   14906 
   14907 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14908 		device_xname(sc->sc_dev), __func__));
   14909 
   14910 	rv = sc->nvm.acquire(sc);
   14911 	if (rv != 0)
   14912 		return rv;
   14913 
   14914 	/*
   14915 	 * We need to know which is the valid flash bank.  In the event
   14916 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14917 	 * managing flash_bank. So it cannot be trusted and needs
   14918 	 * to be updated with each read.
   14919 	 */
   14920 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14921 	if (rv) {
   14922 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14923 			device_xname(sc->sc_dev)));
   14924 		flash_bank = 0;
   14925 	}
   14926 
   14927 	/*
   14928 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14929 	 * size
   14930 	 */
   14931 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14932 
   14933 	for (i = 0; i < words; i++) {
   14934 		/* The NVM part needs a byte offset, hence * 2 */
   14935 		act_offset = bank_offset + ((offset + i) * 2);
   14936 		/* but we must read dword aligned, so mask ... */
   14937 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   14938 		if (rv) {
   14939 			aprint_error_dev(sc->sc_dev,
   14940 			    "%s: failed to read NVM\n", __func__);
   14941 			break;
   14942 		}
   14943 		/* ... and pick out low or high word */
   14944 		if ((act_offset & 0x2) == 0)
   14945 			data[i] = (uint16_t)(dword & 0xFFFF);
   14946 		else
   14947 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   14948 	}
   14949 
   14950 	sc->nvm.release(sc);
   14951 	return rv;
   14952 }
   14953 
   14954 /* iNVM */
   14955 
   14956 static int
   14957 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   14958 {
   14959 	int32_t	 rv = 0;
   14960 	uint32_t invm_dword;
   14961 	uint16_t i;
   14962 	uint8_t record_type, word_address;
   14963 
   14964 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14965 		device_xname(sc->sc_dev), __func__));
   14966 
   14967 	for (i = 0; i < INVM_SIZE; i++) {
   14968 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   14969 		/* Get record type */
   14970 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   14971 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   14972 			break;
   14973 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   14974 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   14975 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   14976 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   14977 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   14978 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   14979 			if (word_address == address) {
   14980 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   14981 				rv = 0;
   14982 				break;
   14983 			}
   14984 		}
   14985 	}
   14986 
   14987 	return rv;
   14988 }
   14989 
   14990 static int
   14991 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14992 {
   14993 	int i, rv;
   14994 
   14995 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14996 		device_xname(sc->sc_dev), __func__));
   14997 
   14998 	rv = sc->nvm.acquire(sc);
   14999 	if (rv != 0)
   15000 		return rv;
   15001 
   15002 	for (i = 0; i < words; i++) {
   15003 		switch (offset + i) {
   15004 		case NVM_OFF_MACADDR:
   15005 		case NVM_OFF_MACADDR1:
   15006 		case NVM_OFF_MACADDR2:
   15007 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   15008 			if (rv != 0) {
   15009 				data[i] = 0xffff;
   15010 				rv = -1;
   15011 			}
   15012 			break;
   15013 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   15014 			rv = wm_nvm_read_word_invm(sc, offset, data);
   15015 			if (rv != 0) {
   15016 				*data = INVM_DEFAULT_AL;
   15017 				rv = 0;
   15018 			}
   15019 			break;
   15020 		case NVM_OFF_CFG2:
   15021 			rv = wm_nvm_read_word_invm(sc, offset, data);
   15022 			if (rv != 0) {
   15023 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   15024 				rv = 0;
   15025 			}
   15026 			break;
   15027 		case NVM_OFF_CFG4:
   15028 			rv = wm_nvm_read_word_invm(sc, offset, data);
   15029 			if (rv != 0) {
   15030 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   15031 				rv = 0;
   15032 			}
   15033 			break;
   15034 		case NVM_OFF_LED_1_CFG:
   15035 			rv = wm_nvm_read_word_invm(sc, offset, data);
   15036 			if (rv != 0) {
   15037 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   15038 				rv = 0;
   15039 			}
   15040 			break;
   15041 		case NVM_OFF_LED_0_2_CFG:
   15042 			rv = wm_nvm_read_word_invm(sc, offset, data);
   15043 			if (rv != 0) {
   15044 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   15045 				rv = 0;
   15046 			}
   15047 			break;
   15048 		case NVM_OFF_ID_LED_SETTINGS:
   15049 			rv = wm_nvm_read_word_invm(sc, offset, data);
   15050 			if (rv != 0) {
   15051 				*data = ID_LED_RESERVED_FFFF;
   15052 				rv = 0;
   15053 			}
   15054 			break;
   15055 		default:
   15056 			DPRINTF(sc, WM_DEBUG_NVM,
   15057 			    ("NVM word 0x%02x is not mapped.\n", offset));
   15058 			*data = NVM_RESERVED_WORD;
   15059 			break;
   15060 		}
   15061 	}
   15062 
   15063 	sc->nvm.release(sc);
   15064 	return rv;
   15065 }
   15066 
   15067 /* Lock, detecting NVM type, validate checksum, version and read */
   15068 
   15069 static int
   15070 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   15071 {
   15072 	uint32_t eecd = 0;
   15073 
   15074 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   15075 	    || sc->sc_type == WM_T_82583) {
   15076 		eecd = CSR_READ(sc, WMREG_EECD);
   15077 
   15078 		/* Isolate bits 15 & 16 */
   15079 		eecd = ((eecd >> 15) & 0x03);
   15080 
   15081 		/* If both bits are set, device is Flash type */
   15082 		if (eecd == 0x03)
   15083 			return 0;
   15084 	}
   15085 	return 1;
   15086 }
   15087 
   15088 static int
   15089 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   15090 {
   15091 	uint32_t eec;
   15092 
   15093 	eec = CSR_READ(sc, WMREG_EEC);
   15094 	if ((eec & EEC_FLASH_DETECTED) != 0)
   15095 		return 1;
   15096 
   15097 	return 0;
   15098 }
   15099 
   15100 /*
   15101  * wm_nvm_validate_checksum
   15102  *
   15103  * The checksum is defined as the sum of the first 64 (16 bit) words.
   15104  */
   15105 static int
   15106 wm_nvm_validate_checksum(struct wm_softc *sc)
   15107 {
   15108 	uint16_t checksum;
   15109 	uint16_t eeprom_data;
   15110 #ifdef WM_DEBUG
   15111 	uint16_t csum_wordaddr, valid_checksum;
   15112 #endif
   15113 	int i;
   15114 
   15115 	checksum = 0;
   15116 
   15117 	/* Don't check for I211 */
   15118 	if (sc->sc_type == WM_T_I211)
   15119 		return 0;
   15120 
   15121 #ifdef WM_DEBUG
   15122 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT) ||
   15123 	    (sc->sc_type == WM_T_PCH_CNP) || (sc->sc_type == WM_T_PCH_TGP)) {
   15124 		csum_wordaddr = NVM_OFF_COMPAT;
   15125 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   15126 	} else {
   15127 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   15128 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   15129 	}
   15130 
   15131 	/* Dump EEPROM image for debug */
   15132 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15133 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15134 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   15135 		/* XXX PCH_SPT? */
   15136 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   15137 		if ((eeprom_data & valid_checksum) == 0)
   15138 			DPRINTF(sc, WM_DEBUG_NVM,
   15139 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   15140 				device_xname(sc->sc_dev), eeprom_data,
   15141 				valid_checksum));
   15142 	}
   15143 
   15144 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
   15145 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   15146 		for (i = 0; i < NVM_SIZE; i++) {
   15147 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   15148 				printf("XXXX ");
   15149 			else
   15150 				printf("%04hx ", eeprom_data);
   15151 			if (i % 8 == 7)
   15152 				printf("\n");
   15153 		}
   15154 	}
   15155 
   15156 #endif /* WM_DEBUG */
   15157 
   15158 	for (i = 0; i < NVM_SIZE; i++) {
   15159 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   15160 			return -1;
   15161 		checksum += eeprom_data;
   15162 	}
   15163 
   15164 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   15165 #ifdef WM_DEBUG
   15166 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   15167 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   15168 #endif
   15169 	}
   15170 
   15171 	return 0;
   15172 }
   15173 
   15174 static void
   15175 wm_nvm_version_invm(struct wm_softc *sc)
   15176 {
   15177 	uint32_t dword;
   15178 
   15179 	/*
   15180 	 * Linux's code to decode version is very strange, so we don't
   15181 	 * obey that algorithm and just use word 61 as the document.
   15182 	 * Perhaps it's not perfect though...
   15183 	 *
   15184 	 * Example:
   15185 	 *
   15186 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   15187 	 */
   15188 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   15189 	dword = __SHIFTOUT(dword, INVM_VER_1);
   15190 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   15191 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   15192 }
   15193 
   15194 static void
   15195 wm_nvm_version(struct wm_softc *sc)
   15196 {
   15197 	uint16_t major, minor, build, patch;
   15198 	uint16_t uid0, uid1;
   15199 	uint16_t nvm_data;
   15200 	uint16_t off;
   15201 	bool check_version = false;
   15202 	bool check_optionrom = false;
   15203 	bool have_build = false;
   15204 	bool have_uid = true;
   15205 
   15206 	/*
   15207 	 * Version format:
   15208 	 *
   15209 	 * XYYZ
   15210 	 * X0YZ
   15211 	 * X0YY
   15212 	 *
   15213 	 * Example:
   15214 	 *
   15215 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   15216 	 *	82571	0x50a6	5.10.6?
   15217 	 *	82572	0x506a	5.6.10?
   15218 	 *	82572EI	0x5069	5.6.9?
   15219 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   15220 	 *		0x2013	2.1.3?
   15221 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   15222 	 * ICH8+82567	0x0040	0.4.0?
   15223 	 * ICH9+82566	0x1040	1.4.0?
   15224 	 *ICH10+82567	0x0043	0.4.3?
   15225 	 *  PCH+82577	0x00c1	0.12.1?
   15226 	 * PCH2+82579	0x00d3	0.13.3?
   15227 	 *		0x00d4	0.13.4?
   15228 	 *  LPT+I218	0x0023	0.2.3?
   15229 	 *  SPT+I219	0x0084	0.8.4?
   15230 	 *  CNP+I219	0x0054	0.5.4?
   15231 	 */
   15232 
   15233 	/*
   15234 	 * XXX
   15235 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   15236 	 * I've never seen real 82574 hardware with such small SPI ROM.
   15237 	 */
   15238 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   15239 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   15240 		have_uid = false;
   15241 
   15242 	switch (sc->sc_type) {
   15243 	case WM_T_82571:
   15244 	case WM_T_82572:
   15245 	case WM_T_82574:
   15246 	case WM_T_82583:
   15247 		check_version = true;
   15248 		check_optionrom = true;
   15249 		have_build = true;
   15250 		break;
   15251 	case WM_T_ICH8:
   15252 	case WM_T_ICH9:
   15253 	case WM_T_ICH10:
   15254 	case WM_T_PCH:
   15255 	case WM_T_PCH2:
   15256 	case WM_T_PCH_LPT:
   15257 	case WM_T_PCH_SPT:
   15258 	case WM_T_PCH_CNP:
   15259 	case WM_T_PCH_TGP:
   15260 		check_version = true;
   15261 		have_build = true;
   15262 		have_uid = false;
   15263 		break;
   15264 	case WM_T_82575:
   15265 	case WM_T_82576:
   15266 	case WM_T_82580:
   15267 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   15268 			check_version = true;
   15269 		break;
   15270 	case WM_T_I211:
   15271 		wm_nvm_version_invm(sc);
   15272 		have_uid = false;
   15273 		goto printver;
   15274 	case WM_T_I210:
   15275 		if (!wm_nvm_flash_presence_i210(sc)) {
   15276 			wm_nvm_version_invm(sc);
   15277 			have_uid = false;
   15278 			goto printver;
   15279 		}
   15280 		/* FALLTHROUGH */
   15281 	case WM_T_I350:
   15282 	case WM_T_I354:
   15283 		check_version = true;
   15284 		check_optionrom = true;
   15285 		break;
   15286 	default:
   15287 		return;
   15288 	}
   15289 	if (check_version
   15290 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   15291 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   15292 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   15293 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   15294 			build = nvm_data & NVM_BUILD_MASK;
   15295 			have_build = true;
   15296 		} else
   15297 			minor = nvm_data & 0x00ff;
   15298 
   15299 		/* Decimal */
   15300 		minor = (minor / 16) * 10 + (minor % 16);
   15301 		sc->sc_nvm_ver_major = major;
   15302 		sc->sc_nvm_ver_minor = minor;
   15303 
   15304 printver:
   15305 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   15306 		    sc->sc_nvm_ver_minor);
   15307 		if (have_build) {
   15308 			sc->sc_nvm_ver_build = build;
   15309 			aprint_verbose(".%d", build);
   15310 		}
   15311 	}
   15312 
   15313 	/* Assume the Option ROM area is at avove NVM_SIZE */
   15314 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   15315 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   15316 		/* Option ROM Version */
   15317 		if ((off != 0x0000) && (off != 0xffff)) {
   15318 			int rv;
   15319 
   15320 			off += NVM_COMBO_VER_OFF;
   15321 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   15322 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   15323 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   15324 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   15325 				/* 16bits */
   15326 				major = uid0 >> 8;
   15327 				build = (uid0 << 8) | (uid1 >> 8);
   15328 				patch = uid1 & 0x00ff;
   15329 				aprint_verbose(", option ROM Version %d.%d.%d",
   15330 				    major, build, patch);
   15331 			}
   15332 		}
   15333 	}
   15334 
   15335 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   15336 		aprint_verbose(", Image Unique ID %08x",
   15337 		    ((uint32_t)uid1 << 16) | uid0);
   15338 }
   15339 
   15340 /*
   15341  * wm_nvm_read:
   15342  *
   15343  *	Read data from the serial EEPROM.
   15344  */
   15345 static int
   15346 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   15347 {
   15348 	int rv;
   15349 
   15350 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   15351 		device_xname(sc->sc_dev), __func__));
   15352 
   15353 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   15354 		return -1;
   15355 
   15356 	rv = sc->nvm.read(sc, word, wordcnt, data);
   15357 
   15358 	return rv;
   15359 }
   15360 
   15361 /*
   15362  * Hardware semaphores.
   15363  * Very complexed...
   15364  */
   15365 
   15366 static int
   15367 wm_get_null(struct wm_softc *sc)
   15368 {
   15369 
   15370 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15371 		device_xname(sc->sc_dev), __func__));
   15372 	return 0;
   15373 }
   15374 
   15375 static void
   15376 wm_put_null(struct wm_softc *sc)
   15377 {
   15378 
   15379 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15380 		device_xname(sc->sc_dev), __func__));
   15381 	return;
   15382 }
   15383 
   15384 static int
   15385 wm_get_eecd(struct wm_softc *sc)
   15386 {
   15387 	uint32_t reg;
   15388 	int x;
   15389 
   15390 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   15391 		device_xname(sc->sc_dev), __func__));
   15392 
   15393 	reg = CSR_READ(sc, WMREG_EECD);
   15394 
   15395 	/* Request EEPROM access. */
   15396 	reg |= EECD_EE_REQ;
   15397 	CSR_WRITE(sc, WMREG_EECD, reg);
   15398 
   15399 	/* ..and wait for it to be granted. */
   15400 	for (x = 0; x < 1000; x++) {
   15401 		reg = CSR_READ(sc, WMREG_EECD);
   15402 		if (reg & EECD_EE_GNT)
   15403 			break;
   15404 		delay(5);
   15405 	}
   15406 	if ((reg & EECD_EE_GNT) == 0) {
   15407 		aprint_error_dev(sc->sc_dev,
   15408 		    "could not acquire EEPROM GNT\n");
   15409 		reg &= ~EECD_EE_REQ;
   15410 		CSR_WRITE(sc, WMREG_EECD, reg);
   15411 		return -1;
   15412 	}
   15413 
   15414 	return 0;
   15415 }
   15416 
   15417 static void
   15418 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   15419 {
   15420 
   15421 	*eecd |= EECD_SK;
   15422 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   15423 	CSR_WRITE_FLUSH(sc);
   15424 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   15425 		delay(1);
   15426 	else
   15427 		delay(50);
   15428 }
   15429 
   15430 static void
   15431 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   15432 {
   15433 
   15434 	*eecd &= ~EECD_SK;
   15435 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   15436 	CSR_WRITE_FLUSH(sc);
   15437 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   15438 		delay(1);
   15439 	else
   15440 		delay(50);
   15441 }
   15442 
   15443 static void
   15444 wm_put_eecd(struct wm_softc *sc)
   15445 {
   15446 	uint32_t reg;
   15447 
   15448 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15449 		device_xname(sc->sc_dev), __func__));
   15450 
   15451 	/* Stop nvm */
   15452 	reg = CSR_READ(sc, WMREG_EECD);
   15453 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   15454 		/* Pull CS high */
   15455 		reg |= EECD_CS;
   15456 		wm_nvm_eec_clock_lower(sc, &reg);
   15457 	} else {
   15458 		/* CS on Microwire is active-high */
   15459 		reg &= ~(EECD_CS | EECD_DI);
   15460 		CSR_WRITE(sc, WMREG_EECD, reg);
   15461 		wm_nvm_eec_clock_raise(sc, &reg);
   15462 		wm_nvm_eec_clock_lower(sc, &reg);
   15463 	}
   15464 
   15465 	reg = CSR_READ(sc, WMREG_EECD);
   15466 	reg &= ~EECD_EE_REQ;
   15467 	CSR_WRITE(sc, WMREG_EECD, reg);
   15468 
   15469 	return;
   15470 }
   15471 
   15472 /*
   15473  * Get hardware semaphore.
   15474  * Same as e1000_get_hw_semaphore_generic()
   15475  */
   15476 static int
   15477 wm_get_swsm_semaphore(struct wm_softc *sc)
   15478 {
   15479 	int32_t timeout;
   15480 	uint32_t swsm;
   15481 
   15482 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15483 		device_xname(sc->sc_dev), __func__));
   15484 	KASSERT(sc->sc_nvm_wordsize > 0);
   15485 
   15486 retry:
   15487 	/* Get the SW semaphore. */
   15488 	timeout = sc->sc_nvm_wordsize + 1;
   15489 	while (timeout) {
   15490 		swsm = CSR_READ(sc, WMREG_SWSM);
   15491 
   15492 		if ((swsm & SWSM_SMBI) == 0)
   15493 			break;
   15494 
   15495 		delay(50);
   15496 		timeout--;
   15497 	}
   15498 
   15499 	if (timeout == 0) {
   15500 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   15501 			/*
   15502 			 * In rare circumstances, the SW semaphore may already
   15503 			 * be held unintentionally. Clear the semaphore once
   15504 			 * before giving up.
   15505 			 */
   15506 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   15507 			wm_put_swsm_semaphore(sc);
   15508 			goto retry;
   15509 		}
   15510 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
   15511 		return -1;
   15512 	}
   15513 
   15514 	/* Get the FW semaphore. */
   15515 	timeout = sc->sc_nvm_wordsize + 1;
   15516 	while (timeout) {
   15517 		swsm = CSR_READ(sc, WMREG_SWSM);
   15518 		swsm |= SWSM_SWESMBI;
   15519 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   15520 		/* If we managed to set the bit we got the semaphore. */
   15521 		swsm = CSR_READ(sc, WMREG_SWSM);
   15522 		if (swsm & SWSM_SWESMBI)
   15523 			break;
   15524 
   15525 		delay(50);
   15526 		timeout--;
   15527 	}
   15528 
   15529 	if (timeout == 0) {
   15530 		aprint_error_dev(sc->sc_dev,
   15531 		    "could not acquire SWSM SWESMBI\n");
   15532 		/* Release semaphores */
   15533 		wm_put_swsm_semaphore(sc);
   15534 		return -1;
   15535 	}
   15536 	return 0;
   15537 }
   15538 
   15539 /*
   15540  * Put hardware semaphore.
   15541  * Same as e1000_put_hw_semaphore_generic()
   15542  */
   15543 static void
   15544 wm_put_swsm_semaphore(struct wm_softc *sc)
   15545 {
   15546 	uint32_t swsm;
   15547 
   15548 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15549 		device_xname(sc->sc_dev), __func__));
   15550 
   15551 	swsm = CSR_READ(sc, WMREG_SWSM);
   15552 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   15553 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   15554 }
   15555 
   15556 /*
   15557  * Get SW/FW semaphore.
   15558  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   15559  */
   15560 static int
   15561 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   15562 {
   15563 	uint32_t swfw_sync;
   15564 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   15565 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   15566 	int timeout;
   15567 
   15568 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15569 		device_xname(sc->sc_dev), __func__));
   15570 
   15571 	if (sc->sc_type == WM_T_80003)
   15572 		timeout = 50;
   15573 	else
   15574 		timeout = 200;
   15575 
   15576 	while (timeout) {
   15577 		if (wm_get_swsm_semaphore(sc)) {
   15578 			aprint_error_dev(sc->sc_dev,
   15579 			    "%s: failed to get semaphore\n",
   15580 			    __func__);
   15581 			return -1;
   15582 		}
   15583 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   15584 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   15585 			swfw_sync |= swmask;
   15586 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   15587 			wm_put_swsm_semaphore(sc);
   15588 			return 0;
   15589 		}
   15590 		wm_put_swsm_semaphore(sc);
   15591 		delay(5000);
   15592 		timeout--;
   15593 	}
   15594 	device_printf(sc->sc_dev,
   15595 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   15596 	    mask, swfw_sync);
   15597 	return -1;
   15598 }
   15599 
   15600 static void
   15601 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   15602 {
   15603 	uint32_t swfw_sync;
   15604 
   15605 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15606 		device_xname(sc->sc_dev), __func__));
   15607 
   15608 	while (wm_get_swsm_semaphore(sc) != 0)
   15609 		continue;
   15610 
   15611 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   15612 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   15613 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   15614 
   15615 	wm_put_swsm_semaphore(sc);
   15616 }
   15617 
   15618 static int
   15619 wm_get_nvm_80003(struct wm_softc *sc)
   15620 {
   15621 	int rv;
   15622 
   15623 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   15624 		device_xname(sc->sc_dev), __func__));
   15625 
   15626 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   15627 		aprint_error_dev(sc->sc_dev,
   15628 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   15629 		return rv;
   15630 	}
   15631 
   15632 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15633 	    && (rv = wm_get_eecd(sc)) != 0) {
   15634 		aprint_error_dev(sc->sc_dev,
   15635 		    "%s: failed to get semaphore(EECD)\n", __func__);
   15636 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15637 		return rv;
   15638 	}
   15639 
   15640 	return 0;
   15641 }
   15642 
   15643 static void
   15644 wm_put_nvm_80003(struct wm_softc *sc)
   15645 {
   15646 
   15647 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15648 		device_xname(sc->sc_dev), __func__));
   15649 
   15650 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15651 		wm_put_eecd(sc);
   15652 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15653 }
   15654 
   15655 static int
   15656 wm_get_nvm_82571(struct wm_softc *sc)
   15657 {
   15658 	int rv;
   15659 
   15660 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15661 		device_xname(sc->sc_dev), __func__));
   15662 
   15663 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   15664 		return rv;
   15665 
   15666 	switch (sc->sc_type) {
   15667 	case WM_T_82573:
   15668 		break;
   15669 	default:
   15670 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15671 			rv = wm_get_eecd(sc);
   15672 		break;
   15673 	}
   15674 
   15675 	if (rv != 0) {
   15676 		aprint_error_dev(sc->sc_dev,
   15677 		    "%s: failed to get semaphore\n",
   15678 		    __func__);
   15679 		wm_put_swsm_semaphore(sc);
   15680 	}
   15681 
   15682 	return rv;
   15683 }
   15684 
   15685 static void
   15686 wm_put_nvm_82571(struct wm_softc *sc)
   15687 {
   15688 
   15689 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15690 		device_xname(sc->sc_dev), __func__));
   15691 
   15692 	switch (sc->sc_type) {
   15693 	case WM_T_82573:
   15694 		break;
   15695 	default:
   15696 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15697 			wm_put_eecd(sc);
   15698 		break;
   15699 	}
   15700 
   15701 	wm_put_swsm_semaphore(sc);
   15702 }
   15703 
   15704 static int
   15705 wm_get_phy_82575(struct wm_softc *sc)
   15706 {
   15707 
   15708 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15709 		device_xname(sc->sc_dev), __func__));
   15710 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15711 }
   15712 
   15713 static void
   15714 wm_put_phy_82575(struct wm_softc *sc)
   15715 {
   15716 
   15717 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15718 		device_xname(sc->sc_dev), __func__));
   15719 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15720 }
   15721 
   15722 static int
   15723 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   15724 {
   15725 	uint32_t ext_ctrl;
   15726 	int timeout = 200;
   15727 
   15728 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15729 		device_xname(sc->sc_dev), __func__));
   15730 
   15731 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15732 	for (timeout = 0; timeout < 200; timeout++) {
   15733 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15734 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15735 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15736 
   15737 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15738 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15739 			return 0;
   15740 		delay(5000);
   15741 	}
   15742 	device_printf(sc->sc_dev,
   15743 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   15744 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15745 	return -1;
   15746 }
   15747 
   15748 static void
   15749 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   15750 {
   15751 	uint32_t ext_ctrl;
   15752 
   15753 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15754 		device_xname(sc->sc_dev), __func__));
   15755 
   15756 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15757 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15758 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15759 
   15760 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15761 }
   15762 
   15763 static int
   15764 wm_get_swflag_ich8lan(struct wm_softc *sc)
   15765 {
   15766 	uint32_t ext_ctrl;
   15767 	int timeout;
   15768 
   15769 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15770 		device_xname(sc->sc_dev), __func__));
   15771 	mutex_enter(sc->sc_ich_phymtx);
   15772 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   15773 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15774 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   15775 			break;
   15776 		delay(1000);
   15777 	}
   15778 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   15779 		device_printf(sc->sc_dev,
   15780 		    "SW has already locked the resource\n");
   15781 		goto out;
   15782 	}
   15783 
   15784 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15785 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15786 	for (timeout = 0; timeout < 1000; timeout++) {
   15787 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15788 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15789 			break;
   15790 		delay(1000);
   15791 	}
   15792 	if (timeout >= 1000) {
   15793 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   15794 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15795 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15796 		goto out;
   15797 	}
   15798 	return 0;
   15799 
   15800 out:
   15801 	mutex_exit(sc->sc_ich_phymtx);
   15802 	return -1;
   15803 }
   15804 
   15805 static void
   15806 wm_put_swflag_ich8lan(struct wm_softc *sc)
   15807 {
   15808 	uint32_t ext_ctrl;
   15809 
   15810 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15811 		device_xname(sc->sc_dev), __func__));
   15812 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15813 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   15814 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15815 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15816 	} else
   15817 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   15818 
   15819 	mutex_exit(sc->sc_ich_phymtx);
   15820 }
   15821 
   15822 static int
   15823 wm_get_nvm_ich8lan(struct wm_softc *sc)
   15824 {
   15825 
   15826 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15827 		device_xname(sc->sc_dev), __func__));
   15828 	mutex_enter(sc->sc_ich_nvmmtx);
   15829 
   15830 	return 0;
   15831 }
   15832 
   15833 static void
   15834 wm_put_nvm_ich8lan(struct wm_softc *sc)
   15835 {
   15836 
   15837 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15838 		device_xname(sc->sc_dev), __func__));
   15839 	mutex_exit(sc->sc_ich_nvmmtx);
   15840 }
   15841 
   15842 static int
   15843 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   15844 {
   15845 	int i = 0;
   15846 	uint32_t reg;
   15847 
   15848 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15849 		device_xname(sc->sc_dev), __func__));
   15850 
   15851 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15852 	do {
   15853 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   15854 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15855 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15856 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   15857 			break;
   15858 		delay(2*1000);
   15859 		i++;
   15860 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   15861 
   15862 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   15863 		wm_put_hw_semaphore_82573(sc);
   15864 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   15865 		    device_xname(sc->sc_dev));
   15866 		return -1;
   15867 	}
   15868 
   15869 	return 0;
   15870 }
   15871 
   15872 static void
   15873 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   15874 {
   15875 	uint32_t reg;
   15876 
   15877 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15878 		device_xname(sc->sc_dev), __func__));
   15879 
   15880 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15881 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15882 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15883 }
   15884 
   15885 /*
   15886  * Management mode and power management related subroutines.
   15887  * BMC, AMT, suspend/resume and EEE.
   15888  */
   15889 
   15890 #ifdef WM_WOL
   15891 static int
   15892 wm_check_mng_mode(struct wm_softc *sc)
   15893 {
   15894 	int rv;
   15895 
   15896 	switch (sc->sc_type) {
   15897 	case WM_T_ICH8:
   15898 	case WM_T_ICH9:
   15899 	case WM_T_ICH10:
   15900 	case WM_T_PCH:
   15901 	case WM_T_PCH2:
   15902 	case WM_T_PCH_LPT:
   15903 	case WM_T_PCH_SPT:
   15904 	case WM_T_PCH_CNP:
   15905 	case WM_T_PCH_TGP:
   15906 		rv = wm_check_mng_mode_ich8lan(sc);
   15907 		break;
   15908 	case WM_T_82574:
   15909 	case WM_T_82583:
   15910 		rv = wm_check_mng_mode_82574(sc);
   15911 		break;
   15912 	case WM_T_82571:
   15913 	case WM_T_82572:
   15914 	case WM_T_82573:
   15915 	case WM_T_80003:
   15916 		rv = wm_check_mng_mode_generic(sc);
   15917 		break;
   15918 	default:
   15919 		/* Noting to do */
   15920 		rv = 0;
   15921 		break;
   15922 	}
   15923 
   15924 	return rv;
   15925 }
   15926 
   15927 static int
   15928 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   15929 {
   15930 	uint32_t fwsm;
   15931 
   15932 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15933 
   15934 	if (((fwsm & FWSM_FW_VALID) != 0)
   15935 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   15936 		return 1;
   15937 
   15938 	return 0;
   15939 }
   15940 
   15941 static int
   15942 wm_check_mng_mode_82574(struct wm_softc *sc)
   15943 {
   15944 	uint16_t data;
   15945 
   15946 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15947 
   15948 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   15949 		return 1;
   15950 
   15951 	return 0;
   15952 }
   15953 
   15954 static int
   15955 wm_check_mng_mode_generic(struct wm_softc *sc)
   15956 {
   15957 	uint32_t fwsm;
   15958 
   15959 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15960 
   15961 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   15962 		return 1;
   15963 
   15964 	return 0;
   15965 }
   15966 #endif /* WM_WOL */
   15967 
   15968 static int
   15969 wm_enable_mng_pass_thru(struct wm_softc *sc)
   15970 {
   15971 	uint32_t manc, fwsm, factps;
   15972 
   15973 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   15974 		return 0;
   15975 
   15976 	manc = CSR_READ(sc, WMREG_MANC);
   15977 
   15978 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   15979 		device_xname(sc->sc_dev), manc));
   15980 	if ((manc & MANC_RECV_TCO_EN) == 0)
   15981 		return 0;
   15982 
   15983 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   15984 		fwsm = CSR_READ(sc, WMREG_FWSM);
   15985 		factps = CSR_READ(sc, WMREG_FACTPS);
   15986 		if (((factps & FACTPS_MNGCG) == 0)
   15987 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   15988 			return 1;
   15989 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   15990 		uint16_t data;
   15991 
   15992 		factps = CSR_READ(sc, WMREG_FACTPS);
   15993 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15994 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   15995 			device_xname(sc->sc_dev), factps, data));
   15996 		if (((factps & FACTPS_MNGCG) == 0)
   15997 		    && ((data & NVM_CFG2_MNGM_MASK)
   15998 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   15999 			return 1;
   16000 	} else if (((manc & MANC_SMBUS_EN) != 0)
   16001 	    && ((manc & MANC_ASF_EN) == 0))
   16002 		return 1;
   16003 
   16004 	return 0;
   16005 }
   16006 
   16007 static bool
   16008 wm_phy_resetisblocked(struct wm_softc *sc)
   16009 {
   16010 	bool blocked = false;
   16011 	uint32_t reg;
   16012 	int i = 0;
   16013 
   16014 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16015 		device_xname(sc->sc_dev), __func__));
   16016 
   16017 	switch (sc->sc_type) {
   16018 	case WM_T_ICH8:
   16019 	case WM_T_ICH9:
   16020 	case WM_T_ICH10:
   16021 	case WM_T_PCH:
   16022 	case WM_T_PCH2:
   16023 	case WM_T_PCH_LPT:
   16024 	case WM_T_PCH_SPT:
   16025 	case WM_T_PCH_CNP:
   16026 	case WM_T_PCH_TGP:
   16027 		do {
   16028 			reg = CSR_READ(sc, WMREG_FWSM);
   16029 			if ((reg & FWSM_RSPCIPHY) == 0) {
   16030 				blocked = true;
   16031 				delay(10*1000);
   16032 				continue;
   16033 			}
   16034 			blocked = false;
   16035 		} while (blocked && (i++ < 30));
   16036 		return blocked;
   16037 		break;
   16038 	case WM_T_82571:
   16039 	case WM_T_82572:
   16040 	case WM_T_82573:
   16041 	case WM_T_82574:
   16042 	case WM_T_82583:
   16043 	case WM_T_80003:
   16044 		reg = CSR_READ(sc, WMREG_MANC);
   16045 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   16046 			return true;
   16047 		else
   16048 			return false;
   16049 		break;
   16050 	default:
   16051 		/* No problem */
   16052 		break;
   16053 	}
   16054 
   16055 	return false;
   16056 }
   16057 
   16058 static void
   16059 wm_get_hw_control(struct wm_softc *sc)
   16060 {
   16061 	uint32_t reg;
   16062 
   16063 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   16064 		device_xname(sc->sc_dev), __func__));
   16065 
   16066 	if (sc->sc_type == WM_T_82573) {
   16067 		reg = CSR_READ(sc, WMREG_SWSM);
   16068 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   16069 	} else if (sc->sc_type >= WM_T_82571) {
   16070 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16071 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   16072 	}
   16073 }
   16074 
   16075 static void
   16076 wm_release_hw_control(struct wm_softc *sc)
   16077 {
   16078 	uint32_t reg;
   16079 
   16080 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   16081 		device_xname(sc->sc_dev), __func__));
   16082 
   16083 	if (sc->sc_type == WM_T_82573) {
   16084 		reg = CSR_READ(sc, WMREG_SWSM);
   16085 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   16086 	} else if (sc->sc_type >= WM_T_82571) {
   16087 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16088 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   16089 	}
   16090 }
   16091 
   16092 static void
   16093 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   16094 {
   16095 	uint32_t reg;
   16096 
   16097 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16098 		device_xname(sc->sc_dev), __func__));
   16099 
   16100 	if (sc->sc_type < WM_T_PCH2)
   16101 		return;
   16102 
   16103 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   16104 
   16105 	if (gate)
   16106 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   16107 	else
   16108 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   16109 
   16110 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   16111 }
   16112 
   16113 static int
   16114 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   16115 {
   16116 	uint32_t fwsm, reg;
   16117 	int rv;
   16118 
   16119 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16120 		device_xname(sc->sc_dev), __func__));
   16121 
   16122 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   16123 	wm_gate_hw_phy_config_ich8lan(sc, true);
   16124 
   16125 	/* Disable ULP */
   16126 	wm_ulp_disable(sc);
   16127 
   16128 	/* Acquire PHY semaphore */
   16129 	rv = sc->phy.acquire(sc);
   16130 	if (rv != 0) {
   16131 		DPRINTF(sc, WM_DEBUG_INIT,
   16132 		    ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
   16133 		return rv;
   16134 	}
   16135 
   16136 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   16137 	 * inaccessible and resetting the PHY is not blocked, toggle the
   16138 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   16139 	 */
   16140 	fwsm = CSR_READ(sc, WMREG_FWSM);
   16141 	switch (sc->sc_type) {
   16142 	case WM_T_PCH_LPT:
   16143 	case WM_T_PCH_SPT:
   16144 	case WM_T_PCH_CNP:
   16145 	case WM_T_PCH_TGP:
   16146 		if (wm_phy_is_accessible_pchlan(sc))
   16147 			break;
   16148 
   16149 		/* Before toggling LANPHYPC, see if PHY is accessible by
   16150 		 * forcing MAC to SMBus mode first.
   16151 		 */
   16152 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16153 		reg |= CTRL_EXT_FORCE_SMBUS;
   16154 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16155 #if 0
   16156 		/* XXX Isn't this required??? */
   16157 		CSR_WRITE_FLUSH(sc);
   16158 #endif
   16159 		/* Wait 50 milliseconds for MAC to finish any retries
   16160 		 * that it might be trying to perform from previous
   16161 		 * attempts to acknowledge any phy read requests.
   16162 		 */
   16163 		delay(50 * 1000);
   16164 		/* FALLTHROUGH */
   16165 	case WM_T_PCH2:
   16166 		if (wm_phy_is_accessible_pchlan(sc) == true)
   16167 			break;
   16168 		/* FALLTHROUGH */
   16169 	case WM_T_PCH:
   16170 		if (sc->sc_type == WM_T_PCH)
   16171 			if ((fwsm & FWSM_FW_VALID) != 0)
   16172 				break;
   16173 
   16174 		if (wm_phy_resetisblocked(sc) == true) {
   16175 			device_printf(sc->sc_dev, "XXX reset is blocked(2)\n");
   16176 			break;
   16177 		}
   16178 
   16179 		/* Toggle LANPHYPC Value bit */
   16180 		wm_toggle_lanphypc_pch_lpt(sc);
   16181 
   16182 		if (sc->sc_type >= WM_T_PCH_LPT) {
   16183 			if (wm_phy_is_accessible_pchlan(sc) == true)
   16184 				break;
   16185 
   16186 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   16187 			 * so ensure that the MAC is also out of SMBus mode
   16188 			 */
   16189 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16190 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16191 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16192 
   16193 			if (wm_phy_is_accessible_pchlan(sc) == true)
   16194 				break;
   16195 			rv = -1;
   16196 		}
   16197 		break;
   16198 	default:
   16199 		break;
   16200 	}
   16201 
   16202 	/* Release semaphore */
   16203 	sc->phy.release(sc);
   16204 
   16205 	if (rv == 0) {
   16206 		/* Check to see if able to reset PHY.  Print error if not */
   16207 		if (wm_phy_resetisblocked(sc)) {
   16208 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   16209 			goto out;
   16210 		}
   16211 
   16212 		/* Reset the PHY before any access to it.  Doing so, ensures
   16213 		 * that the PHY is in a known good state before we read/write
   16214 		 * PHY registers.  The generic reset is sufficient here,
   16215 		 * because we haven't determined the PHY type yet.
   16216 		 */
   16217 		if (wm_reset_phy(sc) != 0)
   16218 			goto out;
   16219 
   16220 		/* On a successful reset, possibly need to wait for the PHY
   16221 		 * to quiesce to an accessible state before returning control
   16222 		 * to the calling function.  If the PHY does not quiesce, then
   16223 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   16224 		 *  the PHY is in.
   16225 		 */
   16226 		if (wm_phy_resetisblocked(sc))
   16227 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   16228 	}
   16229 
   16230 out:
   16231 	/* Ungate automatic PHY configuration on non-managed 82579 */
   16232 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   16233 		delay(10*1000);
   16234 		wm_gate_hw_phy_config_ich8lan(sc, false);
   16235 	}
   16236 
   16237 	return 0;
   16238 }
   16239 
   16240 static void
   16241 wm_init_manageability(struct wm_softc *sc)
   16242 {
   16243 
   16244 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16245 		device_xname(sc->sc_dev), __func__));
   16246 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
   16247 
   16248 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   16249 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   16250 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   16251 
   16252 		/* Disable hardware interception of ARP */
   16253 		manc &= ~MANC_ARP_EN;
   16254 
   16255 		/* Enable receiving management packets to the host */
   16256 		if (sc->sc_type >= WM_T_82571) {
   16257 			manc |= MANC_EN_MNG2HOST;
   16258 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   16259 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   16260 		}
   16261 
   16262 		CSR_WRITE(sc, WMREG_MANC, manc);
   16263 	}
   16264 }
   16265 
   16266 static void
   16267 wm_release_manageability(struct wm_softc *sc)
   16268 {
   16269 
   16270 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   16271 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   16272 
   16273 		manc |= MANC_ARP_EN;
   16274 		if (sc->sc_type >= WM_T_82571)
   16275 			manc &= ~MANC_EN_MNG2HOST;
   16276 
   16277 		CSR_WRITE(sc, WMREG_MANC, manc);
   16278 	}
   16279 }
   16280 
   16281 static void
   16282 wm_get_wakeup(struct wm_softc *sc)
   16283 {
   16284 
   16285 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   16286 	switch (sc->sc_type) {
   16287 	case WM_T_82573:
   16288 	case WM_T_82583:
   16289 		sc->sc_flags |= WM_F_HAS_AMT;
   16290 		/* FALLTHROUGH */
   16291 	case WM_T_80003:
   16292 	case WM_T_82575:
   16293 	case WM_T_82576:
   16294 	case WM_T_82580:
   16295 	case WM_T_I350:
   16296 	case WM_T_I354:
   16297 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   16298 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   16299 		/* FALLTHROUGH */
   16300 	case WM_T_82541:
   16301 	case WM_T_82541_2:
   16302 	case WM_T_82547:
   16303 	case WM_T_82547_2:
   16304 	case WM_T_82571:
   16305 	case WM_T_82572:
   16306 	case WM_T_82574:
   16307 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   16308 		break;
   16309 	case WM_T_ICH8:
   16310 	case WM_T_ICH9:
   16311 	case WM_T_ICH10:
   16312 	case WM_T_PCH:
   16313 	case WM_T_PCH2:
   16314 	case WM_T_PCH_LPT:
   16315 	case WM_T_PCH_SPT:
   16316 	case WM_T_PCH_CNP:
   16317 	case WM_T_PCH_TGP:
   16318 		sc->sc_flags |= WM_F_HAS_AMT;
   16319 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   16320 		break;
   16321 	default:
   16322 		break;
   16323 	}
   16324 
   16325 	/* 1: HAS_MANAGE */
   16326 	if (wm_enable_mng_pass_thru(sc) != 0)
   16327 		sc->sc_flags |= WM_F_HAS_MANAGE;
   16328 
   16329 	/*
   16330 	 * Note that the WOL flags is set after the resetting of the eeprom
   16331 	 * stuff
   16332 	 */
   16333 }
   16334 
   16335 /*
   16336  * Unconfigure Ultra Low Power mode.
   16337  * Only for I217 and newer (see below).
   16338  */
   16339 static int
   16340 wm_ulp_disable(struct wm_softc *sc)
   16341 {
   16342 	uint32_t reg;
   16343 	uint16_t phyreg;
   16344 	int i = 0, rv;
   16345 
   16346 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16347 		device_xname(sc->sc_dev), __func__));
   16348 	/* Exclude old devices */
   16349 	if ((sc->sc_type < WM_T_PCH_LPT)
   16350 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   16351 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   16352 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   16353 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   16354 		return 0;
   16355 
   16356 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   16357 		/* Request ME un-configure ULP mode in the PHY */
   16358 		reg = CSR_READ(sc, WMREG_H2ME);
   16359 		reg &= ~H2ME_ULP;
   16360 		reg |= H2ME_ENFORCE_SETTINGS;
   16361 		CSR_WRITE(sc, WMREG_H2ME, reg);
   16362 
   16363 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   16364 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   16365 			if (i++ == 30) {
   16366 				device_printf(sc->sc_dev, "%s timed out\n",
   16367 				    __func__);
   16368 				return -1;
   16369 			}
   16370 			delay(10 * 1000);
   16371 		}
   16372 		reg = CSR_READ(sc, WMREG_H2ME);
   16373 		reg &= ~H2ME_ENFORCE_SETTINGS;
   16374 		CSR_WRITE(sc, WMREG_H2ME, reg);
   16375 
   16376 		return 0;
   16377 	}
   16378 
   16379 	/* Acquire semaphore */
   16380 	rv = sc->phy.acquire(sc);
   16381 	if (rv != 0) {
   16382 		DPRINTF(sc, WM_DEBUG_INIT,
   16383 		    ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
   16384 		return rv;
   16385 	}
   16386 
   16387 	/* Toggle LANPHYPC */
   16388 	wm_toggle_lanphypc_pch_lpt(sc);
   16389 
   16390 	/* Unforce SMBus mode in PHY */
   16391 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   16392 	if (rv != 0) {
   16393 		uint32_t reg2;
   16394 
   16395 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   16396 		    __func__);
   16397 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   16398 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   16399 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   16400 		delay(50 * 1000);
   16401 
   16402 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   16403 		    &phyreg);
   16404 		if (rv != 0)
   16405 			goto release;
   16406 	}
   16407 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   16408 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   16409 
   16410 	/* Unforce SMBus mode in MAC */
   16411 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16412 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   16413 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16414 
   16415 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   16416 	if (rv != 0)
   16417 		goto release;
   16418 	phyreg |= HV_PM_CTRL_K1_ENA;
   16419 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   16420 
   16421 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   16422 	    &phyreg);
   16423 	if (rv != 0)
   16424 		goto release;
   16425 	phyreg &= ~(I218_ULP_CONFIG1_IND
   16426 	    | I218_ULP_CONFIG1_STICKY_ULP
   16427 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   16428 	    | I218_ULP_CONFIG1_WOL_HOST
   16429 	    | I218_ULP_CONFIG1_INBAND_EXIT
   16430 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   16431 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   16432 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   16433 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   16434 	phyreg |= I218_ULP_CONFIG1_START;
   16435 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   16436 
   16437 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16438 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   16439 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16440 
   16441 release:
   16442 	/* Release semaphore */
   16443 	sc->phy.release(sc);
   16444 	wm_gmii_reset(sc);
   16445 	delay(50 * 1000);
   16446 
   16447 	return rv;
   16448 }
   16449 
   16450 /* WOL in the newer chipset interfaces (pchlan) */
   16451 static int
   16452 wm_enable_phy_wakeup(struct wm_softc *sc)
   16453 {
   16454 	device_t dev = sc->sc_dev;
   16455 	uint32_t mreg, moff;
   16456 	uint16_t wuce, wuc, wufc, preg;
   16457 	int i, rv;
   16458 
   16459 	KASSERT(sc->sc_type >= WM_T_PCH);
   16460 
   16461 	/* Copy MAC RARs to PHY RARs */
   16462 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   16463 
   16464 	/* Activate PHY wakeup */
   16465 	rv = sc->phy.acquire(sc);
   16466 	if (rv != 0) {
   16467 		device_printf(dev, "%s: failed to acquire semaphore\n",
   16468 		    __func__);
   16469 		return rv;
   16470 	}
   16471 
   16472 	/*
   16473 	 * Enable access to PHY wakeup registers.
   16474 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   16475 	 */
   16476 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   16477 	if (rv != 0) {
   16478 		device_printf(dev,
   16479 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   16480 		goto release;
   16481 	}
   16482 
   16483 	/* Copy MAC MTA to PHY MTA */
   16484 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   16485 		uint16_t lo, hi;
   16486 
   16487 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   16488 		lo = (uint16_t)(mreg & 0xffff);
   16489 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   16490 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   16491 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   16492 	}
   16493 
   16494 	/* Configure PHY Rx Control register */
   16495 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   16496 	mreg = CSR_READ(sc, WMREG_RCTL);
   16497 	if (mreg & RCTL_UPE)
   16498 		preg |= BM_RCTL_UPE;
   16499 	if (mreg & RCTL_MPE)
   16500 		preg |= BM_RCTL_MPE;
   16501 	preg &= ~(BM_RCTL_MO_MASK);
   16502 	moff = __SHIFTOUT(mreg, RCTL_MO);
   16503 	if (moff != 0)
   16504 		preg |= moff << BM_RCTL_MO_SHIFT;
   16505 	if (mreg & RCTL_BAM)
   16506 		preg |= BM_RCTL_BAM;
   16507 	if (mreg & RCTL_PMCF)
   16508 		preg |= BM_RCTL_PMCF;
   16509 	mreg = CSR_READ(sc, WMREG_CTRL);
   16510 	if (mreg & CTRL_RFCE)
   16511 		preg |= BM_RCTL_RFCE;
   16512 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   16513 
   16514 	wuc = WUC_APME | WUC_PME_EN;
   16515 	wufc = WUFC_MAG;
   16516 	/* Enable PHY wakeup in MAC register */
   16517 	CSR_WRITE(sc, WMREG_WUC,
   16518 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   16519 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   16520 
   16521 	/* Configure and enable PHY wakeup in PHY registers */
   16522 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   16523 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   16524 
   16525 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   16526 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   16527 
   16528 release:
   16529 	sc->phy.release(sc);
   16530 
   16531 	return 0;
   16532 }
   16533 
   16534 /* Power down workaround on D3 */
   16535 static void
   16536 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   16537 {
   16538 	uint32_t reg;
   16539 	uint16_t phyreg;
   16540 	int i;
   16541 
   16542 	for (i = 0; i < 2; i++) {
   16543 		/* Disable link */
   16544 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16545 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   16546 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16547 
   16548 		/*
   16549 		 * Call gig speed drop workaround on Gig disable before
   16550 		 * accessing any PHY registers
   16551 		 */
   16552 		if (sc->sc_type == WM_T_ICH8)
   16553 			wm_gig_downshift_workaround_ich8lan(sc);
   16554 
   16555 		/* Write VR power-down enable */
   16556 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   16557 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   16558 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   16559 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   16560 
   16561 		/* Read it back and test */
   16562 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   16563 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   16564 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   16565 			break;
   16566 
   16567 		/* Issue PHY reset and repeat at most one more time */
   16568 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   16569 	}
   16570 }
   16571 
   16572 /*
   16573  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   16574  *  @sc: pointer to the HW structure
   16575  *
   16576  *  During S0 to Sx transition, it is possible the link remains at gig
   16577  *  instead of negotiating to a lower speed.  Before going to Sx, set
   16578  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   16579  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   16580  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   16581  *  needs to be written.
   16582  *  Parts that support (and are linked to a partner which support) EEE in
   16583  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   16584  *  than 10Mbps w/o EEE.
   16585  */
   16586 static void
   16587 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   16588 {
   16589 	device_t dev = sc->sc_dev;
   16590 	struct ethercom *ec = &sc->sc_ethercom;
   16591 	uint32_t phy_ctrl;
   16592 	int rv;
   16593 
   16594 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   16595 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   16596 
   16597 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_TGP));
   16598 
   16599 	if (sc->sc_phytype == WMPHY_I217) {
   16600 		uint16_t devid = sc->sc_pcidevid;
   16601 
   16602 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   16603 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   16604 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   16605 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   16606 		    (sc->sc_type >= WM_T_PCH_SPT))
   16607 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   16608 			    CSR_READ(sc, WMREG_FEXTNVM6)
   16609 			    & ~FEXTNVM6_REQ_PLL_CLK);
   16610 
   16611 		if (sc->phy.acquire(sc) != 0)
   16612 			goto out;
   16613 
   16614 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16615 			uint16_t eee_advert;
   16616 
   16617 			rv = wm_read_emi_reg_locked(dev,
   16618 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   16619 			if (rv)
   16620 				goto release;
   16621 
   16622 			/*
   16623 			 * Disable LPLU if both link partners support 100BaseT
   16624 			 * EEE and 100Full is advertised on both ends of the
   16625 			 * link, and enable Auto Enable LPI since there will
   16626 			 * be no driver to enable LPI while in Sx.
   16627 			 */
   16628 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   16629 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   16630 				uint16_t anar, phy_reg;
   16631 
   16632 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   16633 				    &anar);
   16634 				if (anar & ANAR_TX_FD) {
   16635 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   16636 					    PHY_CTRL_NOND0A_LPLU);
   16637 
   16638 					/* Set Auto Enable LPI after link up */
   16639 					sc->phy.readreg_locked(dev, 2,
   16640 					    I217_LPI_GPIO_CTRL, &phy_reg);
   16641 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16642 					sc->phy.writereg_locked(dev, 2,
   16643 					    I217_LPI_GPIO_CTRL, phy_reg);
   16644 				}
   16645 			}
   16646 		}
   16647 
   16648 		/*
   16649 		 * For i217 Intel Rapid Start Technology support,
   16650 		 * when the system is going into Sx and no manageability engine
   16651 		 * is present, the driver must configure proxy to reset only on
   16652 		 * power good.	LPI (Low Power Idle) state must also reset only
   16653 		 * on power good, as well as the MTA (Multicast table array).
   16654 		 * The SMBus release must also be disabled on LCD reset.
   16655 		 */
   16656 
   16657 		/*
   16658 		 * Enable MTA to reset for Intel Rapid Start Technology
   16659 		 * Support
   16660 		 */
   16661 
   16662 release:
   16663 		sc->phy.release(sc);
   16664 	}
   16665 out:
   16666 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   16667 
   16668 	if (sc->sc_type == WM_T_ICH8)
   16669 		wm_gig_downshift_workaround_ich8lan(sc);
   16670 
   16671 	if (sc->sc_type >= WM_T_PCH) {
   16672 		wm_oem_bits_config_ich8lan(sc, false);
   16673 
   16674 		/* Reset PHY to activate OEM bits on 82577/8 */
   16675 		if (sc->sc_type == WM_T_PCH)
   16676 			wm_reset_phy(sc);
   16677 
   16678 		if (sc->phy.acquire(sc) != 0)
   16679 			return;
   16680 		wm_write_smbus_addr(sc);
   16681 		sc->phy.release(sc);
   16682 	}
   16683 }
   16684 
   16685 /*
   16686  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   16687  *  @sc: pointer to the HW structure
   16688  *
   16689  *  During Sx to S0 transitions on non-managed devices or managed devices
   16690  *  on which PHY resets are not blocked, if the PHY registers cannot be
   16691  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   16692  *  the PHY.
   16693  *  On i217, setup Intel Rapid Start Technology.
   16694  */
   16695 static int
   16696 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   16697 {
   16698 	device_t dev = sc->sc_dev;
   16699 	int rv;
   16700 
   16701 	if (sc->sc_type < WM_T_PCH2)
   16702 		return 0;
   16703 
   16704 	rv = wm_init_phy_workarounds_pchlan(sc);
   16705 	if (rv != 0)
   16706 		return rv;
   16707 
   16708 	/* For i217 Intel Rapid Start Technology support when the system
   16709 	 * is transitioning from Sx and no manageability engine is present
   16710 	 * configure SMBus to restore on reset, disable proxy, and enable
   16711 	 * the reset on MTA (Multicast table array).
   16712 	 */
   16713 	if (sc->sc_phytype == WMPHY_I217) {
   16714 		uint16_t phy_reg;
   16715 
   16716 		rv = sc->phy.acquire(sc);
   16717 		if (rv != 0)
   16718 			return rv;
   16719 
   16720 		/* Clear Auto Enable LPI after link up */
   16721 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   16722 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16723 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   16724 
   16725 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16726 			/* Restore clear on SMB if no manageability engine
   16727 			 * is present
   16728 			 */
   16729 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   16730 			    &phy_reg);
   16731 			if (rv != 0)
   16732 				goto release;
   16733 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   16734 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   16735 
   16736 			/* Disable Proxy */
   16737 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   16738 		}
   16739 		/* Enable reset on MTA */
   16740 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   16741 		if (rv != 0)
   16742 			goto release;
   16743 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   16744 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   16745 
   16746 release:
   16747 		sc->phy.release(sc);
   16748 		return rv;
   16749 	}
   16750 
   16751 	return 0;
   16752 }
   16753 
   16754 static void
   16755 wm_enable_wakeup(struct wm_softc *sc)
   16756 {
   16757 	uint32_t reg, pmreg;
   16758 	pcireg_t pmode;
   16759 	int rv = 0;
   16760 
   16761 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16762 		device_xname(sc->sc_dev), __func__));
   16763 
   16764 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16765 	    &pmreg, NULL) == 0)
   16766 		return;
   16767 
   16768 	if ((sc->sc_flags & WM_F_WOL) == 0)
   16769 		goto pme;
   16770 
   16771 	/* Advertise the wakeup capability */
   16772 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   16773 	    | CTRL_SWDPIN(3));
   16774 
   16775 	/* Keep the laser running on fiber adapters */
   16776 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   16777 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   16778 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16779 		reg |= CTRL_EXT_SWDPIN(3);
   16780 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16781 	}
   16782 
   16783 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   16784 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   16785 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   16786 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP) ||
   16787 	    (sc->sc_type == WM_T_PCH_TGP))
   16788 		wm_suspend_workarounds_ich8lan(sc);
   16789 
   16790 #if 0	/* For the multicast packet */
   16791 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   16792 	reg |= WUFC_MC;
   16793 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   16794 #endif
   16795 
   16796 	if (sc->sc_type >= WM_T_PCH) {
   16797 		rv = wm_enable_phy_wakeup(sc);
   16798 		if (rv != 0)
   16799 			goto pme;
   16800 	} else {
   16801 		/* Enable wakeup by the MAC */
   16802 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   16803 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   16804 	}
   16805 
   16806 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   16807 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   16808 		|| (sc->sc_type == WM_T_PCH2))
   16809 	    && (sc->sc_phytype == WMPHY_IGP_3))
   16810 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   16811 
   16812 pme:
   16813 	/* Request PME */
   16814 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   16815 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   16816 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   16817 		/* For WOL */
   16818 		pmode |= PCI_PMCSR_PME_EN;
   16819 	} else {
   16820 		/* Disable WOL */
   16821 		pmode &= ~PCI_PMCSR_PME_EN;
   16822 	}
   16823 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   16824 }
   16825 
   16826 /* Disable ASPM L0s and/or L1 for workaround */
   16827 static void
   16828 wm_disable_aspm(struct wm_softc *sc)
   16829 {
   16830 	pcireg_t reg, mask = 0;
   16831 	unsigned const char *str = "";
   16832 
   16833 	/*
   16834 	 *  Only for PCIe device which has PCIe capability in the PCI config
   16835 	 * space.
   16836 	 */
   16837 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   16838 		return;
   16839 
   16840 	switch (sc->sc_type) {
   16841 	case WM_T_82571:
   16842 	case WM_T_82572:
   16843 		/*
   16844 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   16845 		 * State Power management L1 State (ASPM L1).
   16846 		 */
   16847 		mask = PCIE_LCSR_ASPM_L1;
   16848 		str = "L1 is";
   16849 		break;
   16850 	case WM_T_82573:
   16851 	case WM_T_82574:
   16852 	case WM_T_82583:
   16853 		/*
   16854 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   16855 		 *
   16856 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   16857 		 * some chipset.  The document of 82574 and 82583 says that
   16858 		 * disabling L0s with some specific chipset is sufficient,
   16859 		 * but we follow as of the Intel em driver does.
   16860 		 *
   16861 		 * References:
   16862 		 * Errata 8 of the Specification Update of i82573.
   16863 		 * Errata 20 of the Specification Update of i82574.
   16864 		 * Errata 9 of the Specification Update of i82583.
   16865 		 */
   16866 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   16867 		str = "L0s and L1 are";
   16868 		break;
   16869 	default:
   16870 		return;
   16871 	}
   16872 
   16873 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16874 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   16875 	reg &= ~mask;
   16876 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16877 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   16878 
   16879 	/* Print only in wm_attach() */
   16880 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   16881 		aprint_verbose_dev(sc->sc_dev,
   16882 		    "ASPM %s disabled to workaround the errata.\n", str);
   16883 }
   16884 
   16885 /* LPLU */
   16886 
   16887 static void
   16888 wm_lplu_d0_disable(struct wm_softc *sc)
   16889 {
   16890 	struct mii_data *mii = &sc->sc_mii;
   16891 	uint32_t reg;
   16892 	uint16_t phyval;
   16893 
   16894 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16895 		device_xname(sc->sc_dev), __func__));
   16896 
   16897 	if (sc->sc_phytype == WMPHY_IFE)
   16898 		return;
   16899 
   16900 	switch (sc->sc_type) {
   16901 	case WM_T_82571:
   16902 	case WM_T_82572:
   16903 	case WM_T_82573:
   16904 	case WM_T_82575:
   16905 	case WM_T_82576:
   16906 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
   16907 		phyval &= ~PMR_D0_LPLU;
   16908 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
   16909 		break;
   16910 	case WM_T_82580:
   16911 	case WM_T_I350:
   16912 	case WM_T_I210:
   16913 	case WM_T_I211:
   16914 		reg = CSR_READ(sc, WMREG_PHPM);
   16915 		reg &= ~PHPM_D0A_LPLU;
   16916 		CSR_WRITE(sc, WMREG_PHPM, reg);
   16917 		break;
   16918 	case WM_T_82574:
   16919 	case WM_T_82583:
   16920 	case WM_T_ICH8:
   16921 	case WM_T_ICH9:
   16922 	case WM_T_ICH10:
   16923 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16924 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   16925 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16926 		CSR_WRITE_FLUSH(sc);
   16927 		break;
   16928 	case WM_T_PCH:
   16929 	case WM_T_PCH2:
   16930 	case WM_T_PCH_LPT:
   16931 	case WM_T_PCH_SPT:
   16932 	case WM_T_PCH_CNP:
   16933 	case WM_T_PCH_TGP:
   16934 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   16935 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   16936 		if (wm_phy_resetisblocked(sc) == false)
   16937 			phyval |= HV_OEM_BITS_ANEGNOW;
   16938 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   16939 		break;
   16940 	default:
   16941 		break;
   16942 	}
   16943 }
   16944 
   16945 /* EEE */
   16946 
   16947 static int
   16948 wm_set_eee_i350(struct wm_softc *sc)
   16949 {
   16950 	struct ethercom *ec = &sc->sc_ethercom;
   16951 	uint32_t ipcnfg, eeer;
   16952 	uint32_t ipcnfg_mask
   16953 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   16954 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   16955 
   16956 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   16957 
   16958 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   16959 	eeer = CSR_READ(sc, WMREG_EEER);
   16960 
   16961 	/* Enable or disable per user setting */
   16962 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16963 		ipcnfg |= ipcnfg_mask;
   16964 		eeer |= eeer_mask;
   16965 	} else {
   16966 		ipcnfg &= ~ipcnfg_mask;
   16967 		eeer &= ~eeer_mask;
   16968 	}
   16969 
   16970 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   16971 	CSR_WRITE(sc, WMREG_EEER, eeer);
   16972 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   16973 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   16974 
   16975 	return 0;
   16976 }
   16977 
   16978 static int
   16979 wm_set_eee_pchlan(struct wm_softc *sc)
   16980 {
   16981 	device_t dev = sc->sc_dev;
   16982 	struct ethercom *ec = &sc->sc_ethercom;
   16983 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   16984 	int rv;
   16985 
   16986 	switch (sc->sc_phytype) {
   16987 	case WMPHY_82579:
   16988 		lpa = I82579_EEE_LP_ABILITY;
   16989 		pcs_status = I82579_EEE_PCS_STATUS;
   16990 		adv_addr = I82579_EEE_ADVERTISEMENT;
   16991 		break;
   16992 	case WMPHY_I217:
   16993 		lpa = I217_EEE_LP_ABILITY;
   16994 		pcs_status = I217_EEE_PCS_STATUS;
   16995 		adv_addr = I217_EEE_ADVERTISEMENT;
   16996 		break;
   16997 	default:
   16998 		return 0;
   16999 	}
   17000 
   17001 	rv = sc->phy.acquire(sc);
   17002 	if (rv != 0) {
   17003 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   17004 		return rv;
   17005 	}
   17006 
   17007 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   17008 	if (rv != 0)
   17009 		goto release;
   17010 
   17011 	/* Clear bits that enable EEE in various speeds */
   17012 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   17013 
   17014 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   17015 		/* Save off link partner's EEE ability */
   17016 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   17017 		if (rv != 0)
   17018 			goto release;
   17019 
   17020 		/* Read EEE advertisement */
   17021 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   17022 			goto release;
   17023 
   17024 		/*
   17025 		 * Enable EEE only for speeds in which the link partner is
   17026 		 * EEE capable and for which we advertise EEE.
   17027 		 */
   17028 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   17029 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   17030 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   17031 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   17032 			if ((data & ANLPAR_TX_FD) != 0)
   17033 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   17034 			else {
   17035 				/*
   17036 				 * EEE is not supported in 100Half, so ignore
   17037 				 * partner's EEE in 100 ability if full-duplex
   17038 				 * is not advertised.
   17039 				 */
   17040 				sc->eee_lp_ability
   17041 				    &= ~AN_EEEADVERT_100_TX;
   17042 			}
   17043 		}
   17044 	}
   17045 
   17046 	if (sc->sc_phytype == WMPHY_82579) {
   17047 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   17048 		if (rv != 0)
   17049 			goto release;
   17050 
   17051 		data &= ~I82579_LPI_PLL_SHUT_100;
   17052 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   17053 	}
   17054 
   17055 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   17056 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   17057 		goto release;
   17058 
   17059 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   17060 release:
   17061 	sc->phy.release(sc);
   17062 
   17063 	return rv;
   17064 }
   17065 
   17066 static int
   17067 wm_set_eee(struct wm_softc *sc)
   17068 {
   17069 	struct ethercom *ec = &sc->sc_ethercom;
   17070 
   17071 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   17072 		return 0;
   17073 
   17074 	if (sc->sc_type == WM_T_I354) {
   17075 		/* I354 uses an external PHY */
   17076 		return 0; /* not yet */
   17077 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   17078 		return wm_set_eee_i350(sc);
   17079 	else if (sc->sc_type >= WM_T_PCH2)
   17080 		return wm_set_eee_pchlan(sc);
   17081 
   17082 	return 0;
   17083 }
   17084 
   17085 /*
   17086  * Workarounds (mainly PHY related).
   17087  * Basically, PHY's workarounds are in the PHY drivers.
   17088  */
   17089 
   17090 /* Workaround for 82566 Kumeran PCS lock loss */
   17091 static int
   17092 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   17093 {
   17094 	struct mii_data *mii = &sc->sc_mii;
   17095 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   17096 	int i, reg, rv;
   17097 	uint16_t phyreg;
   17098 
   17099 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17100 		device_xname(sc->sc_dev), __func__));
   17101 
   17102 	/* If the link is not up, do nothing */
   17103 	if ((status & STATUS_LU) == 0)
   17104 		return 0;
   17105 
   17106 	/* Nothing to do if the link is other than 1Gbps */
   17107 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   17108 		return 0;
   17109 
   17110 	for (i = 0; i < 10; i++) {
   17111 		/* read twice */
   17112 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   17113 		if (rv != 0)
   17114 			return rv;
   17115 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   17116 		if (rv != 0)
   17117 			return rv;
   17118 
   17119 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   17120 			goto out;	/* GOOD! */
   17121 
   17122 		/* Reset the PHY */
   17123 		wm_reset_phy(sc);
   17124 		delay(5*1000);
   17125 	}
   17126 
   17127 	/* Disable GigE link negotiation */
   17128 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   17129 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   17130 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   17131 
   17132 	/*
   17133 	 * Call gig speed drop workaround on Gig disable before accessing
   17134 	 * any PHY registers.
   17135 	 */
   17136 	wm_gig_downshift_workaround_ich8lan(sc);
   17137 
   17138 out:
   17139 	return 0;
   17140 }
   17141 
   17142 /*
   17143  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   17144  *  @sc: pointer to the HW structure
   17145  *
   17146  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   17147  *  LPLU, Gig disable, MDIC PHY reset):
   17148  *    1) Set Kumeran Near-end loopback
   17149  *    2) Clear Kumeran Near-end loopback
   17150  *  Should only be called for ICH8[m] devices with any 1G Phy.
   17151  */
   17152 static void
   17153 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   17154 {
   17155 	uint16_t kmreg;
   17156 
   17157 	/* Only for igp3 */
   17158 	if (sc->sc_phytype == WMPHY_IGP_3) {
   17159 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   17160 			return;
   17161 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   17162 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   17163 			return;
   17164 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   17165 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   17166 	}
   17167 }
   17168 
   17169 /*
   17170  * Workaround for pch's PHYs
   17171  * XXX should be moved to new PHY driver?
   17172  */
   17173 static int
   17174 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   17175 {
   17176 	device_t dev = sc->sc_dev;
   17177 	struct mii_data *mii = &sc->sc_mii;
   17178 	struct mii_softc *child;
   17179 	uint16_t phy_data, phyrev = 0;
   17180 	int phytype = sc->sc_phytype;
   17181 	int rv;
   17182 
   17183 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17184 		device_xname(dev), __func__));
   17185 	KASSERT(sc->sc_type == WM_T_PCH);
   17186 
   17187 	/* Set MDIO slow mode before any other MDIO access */
   17188 	if (phytype == WMPHY_82577)
   17189 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   17190 			return rv;
   17191 
   17192 	child = LIST_FIRST(&mii->mii_phys);
   17193 	if (child != NULL)
   17194 		phyrev = child->mii_mpd_rev;
   17195 
   17196 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   17197 	if ((child != NULL) &&
   17198 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   17199 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   17200 		/* Disable generation of early preamble (0x4431) */
   17201 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   17202 		    &phy_data);
   17203 		if (rv != 0)
   17204 			return rv;
   17205 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   17206 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   17207 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   17208 		    phy_data);
   17209 		if (rv != 0)
   17210 			return rv;
   17211 
   17212 		/* Preamble tuning for SSC */
   17213 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   17214 		if (rv != 0)
   17215 			return rv;
   17216 	}
   17217 
   17218 	/* 82578 */
   17219 	if (phytype == WMPHY_82578) {
   17220 		/*
   17221 		 * Return registers to default by doing a soft reset then
   17222 		 * writing 0x3140 to the control register
   17223 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   17224 		 */
   17225 		if ((child != NULL) && (phyrev < 2)) {
   17226 			PHY_RESET(child);
   17227 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   17228 			if (rv != 0)
   17229 				return rv;
   17230 		}
   17231 	}
   17232 
   17233 	/* Select page 0 */
   17234 	if ((rv = sc->phy.acquire(sc)) != 0)
   17235 		return rv;
   17236 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
   17237 	sc->phy.release(sc);
   17238 	if (rv != 0)
   17239 		return rv;
   17240 
   17241 	/*
   17242 	 * Configure the K1 Si workaround during phy reset assuming there is
   17243 	 * link so that it disables K1 if link is in 1Gbps.
   17244 	 */
   17245 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   17246 		return rv;
   17247 
   17248 	/* Workaround for link disconnects on a busy hub in half duplex */
   17249 	rv = sc->phy.acquire(sc);
   17250 	if (rv)
   17251 		return rv;
   17252 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   17253 	if (rv)
   17254 		goto release;
   17255 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   17256 	    phy_data & 0x00ff);
   17257 	if (rv)
   17258 		goto release;
   17259 
   17260 	/* Set MSE higher to enable link to stay up when noise is high */
   17261 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   17262 release:
   17263 	sc->phy.release(sc);
   17264 
   17265 	return rv;
   17266 }
   17267 
   17268 /*
   17269  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   17270  *  @sc:   pointer to the HW structure
   17271  */
   17272 static void
   17273 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   17274 {
   17275 
   17276 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17277 		device_xname(sc->sc_dev), __func__));
   17278 
   17279 	if (sc->phy.acquire(sc) != 0)
   17280 		return;
   17281 
   17282 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   17283 
   17284 	sc->phy.release(sc);
   17285 }
   17286 
   17287 static void
   17288 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
   17289 {
   17290 	device_t dev = sc->sc_dev;
   17291 	uint32_t mac_reg;
   17292 	uint16_t i, wuce;
   17293 	int count;
   17294 
   17295 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17296 		device_xname(dev), __func__));
   17297 
   17298 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   17299 		return;
   17300 
   17301 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   17302 	count = wm_rar_count(sc);
   17303 	for (i = 0; i < count; i++) {
   17304 		uint16_t lo, hi;
   17305 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   17306 		lo = (uint16_t)(mac_reg & 0xffff);
   17307 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   17308 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   17309 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   17310 
   17311 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   17312 		lo = (uint16_t)(mac_reg & 0xffff);
   17313 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   17314 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   17315 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   17316 	}
   17317 
   17318 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   17319 }
   17320 
   17321 /*
   17322  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
   17323  *  with 82579 PHY
   17324  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
   17325  */
   17326 static int
   17327 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
   17328 {
   17329 	device_t dev = sc->sc_dev;
   17330 	int rar_count;
   17331 	int rv;
   17332 	uint32_t mac_reg;
   17333 	uint16_t dft_ctrl, data;
   17334 	uint16_t i;
   17335 
   17336 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17337 		device_xname(dev), __func__));
   17338 
   17339 	if (sc->sc_type < WM_T_PCH2)
   17340 		return 0;
   17341 
   17342 	/* Acquire PHY semaphore */
   17343 	rv = sc->phy.acquire(sc);
   17344 	if (rv != 0)
   17345 		return rv;
   17346 
   17347 	/* Disable Rx path while enabling/disabling workaround */
   17348 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
   17349 	if (rv != 0)
   17350 		goto out;
   17351 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   17352 	    dft_ctrl | (1 << 14));
   17353 	if (rv != 0)
   17354 		goto out;
   17355 
   17356 	if (enable) {
   17357 		/* Write Rx addresses (rar_entry_count for RAL/H, and
   17358 		 * SHRAL/H) and initial CRC values to the MAC
   17359 		 */
   17360 		rar_count = wm_rar_count(sc);
   17361 		for (i = 0; i < rar_count; i++) {
   17362 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
   17363 			uint32_t addr_high, addr_low;
   17364 
   17365 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   17366 			if (!(addr_high & RAL_AV))
   17367 				continue;
   17368 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   17369 			mac_addr[0] = (addr_low & 0xFF);
   17370 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
   17371 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
   17372 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
   17373 			mac_addr[4] = (addr_high & 0xFF);
   17374 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
   17375 
   17376 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
   17377 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
   17378 		}
   17379 
   17380 		/* Write Rx addresses to the PHY */
   17381 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   17382 	}
   17383 
   17384 	/*
   17385 	 * If enable ==
   17386 	 *	true: Enable jumbo frame workaround in the MAC.
   17387 	 *	false: Write MAC register values back to h/w defaults.
   17388 	 */
   17389 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
   17390 	if (enable) {
   17391 		mac_reg &= ~(1 << 14);
   17392 		mac_reg |= (7 << 15);
   17393 	} else
   17394 		mac_reg &= ~(0xf << 14);
   17395 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
   17396 
   17397 	mac_reg = CSR_READ(sc, WMREG_RCTL);
   17398 	if (enable) {
   17399 		mac_reg |= RCTL_SECRC;
   17400 		sc->sc_rctl |= RCTL_SECRC;
   17401 		sc->sc_flags |= WM_F_CRC_STRIP;
   17402 	} else {
   17403 		mac_reg &= ~RCTL_SECRC;
   17404 		sc->sc_rctl &= ~RCTL_SECRC;
   17405 		sc->sc_flags &= ~WM_F_CRC_STRIP;
   17406 	}
   17407 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
   17408 
   17409 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
   17410 	if (rv != 0)
   17411 		goto out;
   17412 	if (enable)
   17413 		data |= 1 << 0;
   17414 	else
   17415 		data &= ~(1 << 0);
   17416 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
   17417 	if (rv != 0)
   17418 		goto out;
   17419 
   17420 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
   17421 	if (rv != 0)
   17422 		goto out;
   17423 	/*
   17424 	 * XXX FreeBSD and Linux do the same thing that they set the same value
   17425 	 * on both the enable case and the disable case. Is it correct?
   17426 	 */
   17427 	data &= ~(0xf << 8);
   17428 	data |= (0xb << 8);
   17429 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
   17430 	if (rv != 0)
   17431 		goto out;
   17432 
   17433 	/*
   17434 	 * If enable ==
   17435 	 *	true: Enable jumbo frame workaround in the PHY.
   17436 	 *	false: Write PHY register values back to h/w defaults.
   17437 	 */
   17438 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
   17439 	if (rv != 0)
   17440 		goto out;
   17441 	data &= ~(0x7F << 5);
   17442 	if (enable)
   17443 		data |= (0x37 << 5);
   17444 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
   17445 	if (rv != 0)
   17446 		goto out;
   17447 
   17448 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
   17449 	if (rv != 0)
   17450 		goto out;
   17451 	if (enable)
   17452 		data &= ~(1 << 13);
   17453 	else
   17454 		data |= (1 << 13);
   17455 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
   17456 	if (rv != 0)
   17457 		goto out;
   17458 
   17459 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
   17460 	if (rv != 0)
   17461 		goto out;
   17462 	data &= ~(0x3FF << 2);
   17463 	if (enable)
   17464 		data |= (I82579_TX_PTR_GAP << 2);
   17465 	else
   17466 		data |= (0x8 << 2);
   17467 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
   17468 	if (rv != 0)
   17469 		goto out;
   17470 
   17471 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
   17472 	    enable ? 0xf100 : 0x7e00);
   17473 	if (rv != 0)
   17474 		goto out;
   17475 
   17476 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
   17477 	if (rv != 0)
   17478 		goto out;
   17479 	if (enable)
   17480 		data |= 1 << 10;
   17481 	else
   17482 		data &= ~(1 << 10);
   17483 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
   17484 	if (rv != 0)
   17485 		goto out;
   17486 
   17487 	/* Re-enable Rx path after enabling/disabling workaround */
   17488 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   17489 	    dft_ctrl & ~(1 << 14));
   17490 
   17491 out:
   17492 	sc->phy.release(sc);
   17493 
   17494 	return rv;
   17495 }
   17496 
   17497 /*
   17498  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   17499  *  done after every PHY reset.
   17500  */
   17501 static int
   17502 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   17503 {
   17504 	device_t dev = sc->sc_dev;
   17505 	int rv;
   17506 
   17507 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17508 		device_xname(dev), __func__));
   17509 	KASSERT(sc->sc_type == WM_T_PCH2);
   17510 
   17511 	/* Set MDIO slow mode before any other MDIO access */
   17512 	rv = wm_set_mdio_slow_mode_hv(sc);
   17513 	if (rv != 0)
   17514 		return rv;
   17515 
   17516 	rv = sc->phy.acquire(sc);
   17517 	if (rv != 0)
   17518 		return rv;
   17519 	/* Set MSE higher to enable link to stay up when noise is high */
   17520 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   17521 	if (rv != 0)
   17522 		goto release;
   17523 	/* Drop link after 5 times MSE threshold was reached */
   17524 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   17525 release:
   17526 	sc->phy.release(sc);
   17527 
   17528 	return rv;
   17529 }
   17530 
   17531 /**
   17532  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   17533  *  @link: link up bool flag
   17534  *
   17535  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   17536  *  preventing further DMA write requests.  Workaround the issue by disabling
   17537  *  the de-assertion of the clock request when in 1Gpbs mode.
   17538  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   17539  *  speeds in order to avoid Tx hangs.
   17540  **/
   17541 static int
   17542 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   17543 {
   17544 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   17545 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   17546 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   17547 	uint16_t phyreg;
   17548 
   17549 	if (link && (speed == STATUS_SPEED_1000)) {
   17550 		int rv;
   17551 
   17552 		rv = sc->phy.acquire(sc);
   17553 		if (rv != 0)
   17554 			return rv;
   17555 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17556 		    &phyreg);
   17557 		if (rv != 0)
   17558 			goto release;
   17559 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17560 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   17561 		if (rv != 0)
   17562 			goto release;
   17563 		delay(20);
   17564 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   17565 
   17566 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17567 		    &phyreg);
   17568 release:
   17569 		sc->phy.release(sc);
   17570 		return rv;
   17571 	}
   17572 
   17573 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   17574 
   17575 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   17576 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   17577 	    || !link
   17578 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   17579 		goto update_fextnvm6;
   17580 
   17581 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   17582 
   17583 	/* Clear link status transmit timeout */
   17584 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   17585 	if (speed == STATUS_SPEED_100) {
   17586 		/* Set inband Tx timeout to 5x10us for 100Half */
   17587 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   17588 
   17589 		/* Do not extend the K1 entry latency for 100Half */
   17590 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   17591 	} else {
   17592 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   17593 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   17594 
   17595 		/* Extend the K1 entry latency for 10 Mbps */
   17596 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   17597 	}
   17598 
   17599 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   17600 
   17601 update_fextnvm6:
   17602 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   17603 	return 0;
   17604 }
   17605 
   17606 /*
   17607  *  wm_k1_gig_workaround_hv - K1 Si workaround
   17608  *  @sc:   pointer to the HW structure
   17609  *  @link: link up bool flag
   17610  *
   17611  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   17612  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   17613  *  If link is down, the function will restore the default K1 setting located
   17614  *  in the NVM.
   17615  */
   17616 static int
   17617 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   17618 {
   17619 	int k1_enable = sc->sc_nvm_k1_enabled;
   17620 	int rv;
   17621 
   17622 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17623 		device_xname(sc->sc_dev), __func__));
   17624 
   17625 	rv = sc->phy.acquire(sc);
   17626 	if (rv != 0)
   17627 		return rv;
   17628 
   17629 	if (link) {
   17630 		k1_enable = 0;
   17631 
   17632 		/* Link stall fix for link up */
   17633 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   17634 		    0x0100);
   17635 	} else {
   17636 		/* Link stall fix for link down */
   17637 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   17638 		    0x4100);
   17639 	}
   17640 
   17641 	wm_configure_k1_ich8lan(sc, k1_enable);
   17642 	sc->phy.release(sc);
   17643 
   17644 	return 0;
   17645 }
   17646 
   17647 /*
   17648  *  wm_k1_workaround_lv - K1 Si workaround
   17649  *  @sc:   pointer to the HW structure
   17650  *
   17651  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   17652  *  Disable K1 for 1000 and 100 speeds
   17653  */
   17654 static int
   17655 wm_k1_workaround_lv(struct wm_softc *sc)
   17656 {
   17657 	uint32_t reg;
   17658 	uint16_t phyreg;
   17659 	int rv;
   17660 
   17661 	if (sc->sc_type != WM_T_PCH2)
   17662 		return 0;
   17663 
   17664 	/* Set K1 beacon duration based on 10Mbps speed */
   17665 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   17666 	if (rv != 0)
   17667 		return rv;
   17668 
   17669 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   17670 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   17671 		if (phyreg &
   17672 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   17673 			/* LV 1G/100 Packet drop issue wa  */
   17674 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   17675 			    &phyreg);
   17676 			if (rv != 0)
   17677 				return rv;
   17678 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   17679 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   17680 			    phyreg);
   17681 			if (rv != 0)
   17682 				return rv;
   17683 		} else {
   17684 			/* For 10Mbps */
   17685 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   17686 			reg &= ~FEXTNVM4_BEACON_DURATION;
   17687 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   17688 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   17689 		}
   17690 	}
   17691 
   17692 	return 0;
   17693 }
   17694 
   17695 /*
   17696  *  wm_link_stall_workaround_hv - Si workaround
   17697  *  @sc: pointer to the HW structure
   17698  *
   17699  *  This function works around a Si bug where the link partner can get
   17700  *  a link up indication before the PHY does. If small packets are sent
   17701  *  by the link partner they can be placed in the packet buffer without
   17702  *  being properly accounted for by the PHY and will stall preventing
   17703  *  further packets from being received.  The workaround is to clear the
   17704  *  packet buffer after the PHY detects link up.
   17705  */
   17706 static int
   17707 wm_link_stall_workaround_hv(struct wm_softc *sc)
   17708 {
   17709 	uint16_t phyreg;
   17710 
   17711 	if (sc->sc_phytype != WMPHY_82578)
   17712 		return 0;
   17713 
   17714 	/* Do not apply workaround if in PHY loopback bit 14 set */
   17715 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   17716 	if ((phyreg & BMCR_LOOP) != 0)
   17717 		return 0;
   17718 
   17719 	/* Check if link is up and at 1Gbps */
   17720 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   17721 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17722 	    | BM_CS_STATUS_SPEED_MASK;
   17723 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17724 		| BM_CS_STATUS_SPEED_1000))
   17725 		return 0;
   17726 
   17727 	delay(200 * 1000);	/* XXX too big */
   17728 
   17729 	/* Flush the packets in the fifo buffer */
   17730 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17731 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   17732 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17733 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   17734 
   17735 	return 0;
   17736 }
   17737 
   17738 static int
   17739 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   17740 {
   17741 	int rv;
   17742 
   17743 	rv = sc->phy.acquire(sc);
   17744 	if (rv != 0) {
   17745 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   17746 		    __func__);
   17747 		return rv;
   17748 	}
   17749 
   17750 	rv = wm_set_mdio_slow_mode_hv_locked(sc);
   17751 
   17752 	sc->phy.release(sc);
   17753 
   17754 	return rv;
   17755 }
   17756 
   17757 static int
   17758 wm_set_mdio_slow_mode_hv_locked(struct wm_softc *sc)
   17759 {
   17760 	int rv;
   17761 	uint16_t reg;
   17762 
   17763 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   17764 	if (rv != 0)
   17765 		return rv;
   17766 
   17767 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   17768 	    reg | HV_KMRN_MDIO_SLOW);
   17769 }
   17770 
   17771 /*
   17772  *  wm_configure_k1_ich8lan - Configure K1 power state
   17773  *  @sc: pointer to the HW structure
   17774  *  @enable: K1 state to configure
   17775  *
   17776  *  Configure the K1 power state based on the provided parameter.
   17777  *  Assumes semaphore already acquired.
   17778  */
   17779 static void
   17780 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   17781 {
   17782 	uint32_t ctrl, ctrl_ext, tmp;
   17783 	uint16_t kmreg;
   17784 	int rv;
   17785 
   17786 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17787 
   17788 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   17789 	if (rv != 0)
   17790 		return;
   17791 
   17792 	if (k1_enable)
   17793 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   17794 	else
   17795 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   17796 
   17797 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   17798 	if (rv != 0)
   17799 		return;
   17800 
   17801 	delay(20);
   17802 
   17803 	ctrl = CSR_READ(sc, WMREG_CTRL);
   17804 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   17805 
   17806 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   17807 	tmp |= CTRL_FRCSPD;
   17808 
   17809 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   17810 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   17811 	CSR_WRITE_FLUSH(sc);
   17812 	delay(20);
   17813 
   17814 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   17815 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   17816 	CSR_WRITE_FLUSH(sc);
   17817 	delay(20);
   17818 
   17819 	return;
   17820 }
   17821 
   17822 /* special case - for 82575 - need to do manual init ... */
   17823 static void
   17824 wm_reset_init_script_82575(struct wm_softc *sc)
   17825 {
   17826 	/*
   17827 	 * Remark: this is untested code - we have no board without EEPROM
   17828 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   17829 	 */
   17830 
   17831 	/* SerDes configuration via SERDESCTRL */
   17832 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   17833 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   17834 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   17835 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   17836 
   17837 	/* CCM configuration via CCMCTL register */
   17838 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   17839 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   17840 
   17841 	/* PCIe lanes configuration */
   17842 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   17843 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   17844 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   17845 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   17846 
   17847 	/* PCIe PLL Configuration */
   17848 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   17849 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   17850 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   17851 }
   17852 
   17853 static void
   17854 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   17855 {
   17856 	uint32_t reg;
   17857 	uint16_t nvmword;
   17858 	int rv;
   17859 
   17860 	if (sc->sc_type != WM_T_82580)
   17861 		return;
   17862 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   17863 		return;
   17864 
   17865 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   17866 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   17867 	if (rv != 0) {
   17868 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   17869 		    __func__);
   17870 		return;
   17871 	}
   17872 
   17873 	reg = CSR_READ(sc, WMREG_MDICNFG);
   17874 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   17875 		reg |= MDICNFG_DEST;
   17876 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   17877 		reg |= MDICNFG_COM_MDIO;
   17878 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17879 }
   17880 
   17881 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   17882 
   17883 static bool
   17884 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   17885 {
   17886 	uint32_t reg;
   17887 	uint16_t id1, id2;
   17888 	int i, rv;
   17889 
   17890 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17891 		device_xname(sc->sc_dev), __func__));
   17892 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17893 
   17894 	id1 = id2 = 0xffff;
   17895 	for (i = 0; i < 2; i++) {
   17896 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   17897 		    &id1);
   17898 		if ((rv != 0) || MII_INVALIDID(id1))
   17899 			continue;
   17900 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   17901 		    &id2);
   17902 		if ((rv != 0) || MII_INVALIDID(id2))
   17903 			continue;
   17904 		break;
   17905 	}
   17906 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   17907 		goto out;
   17908 
   17909 	/*
   17910 	 * In case the PHY needs to be in mdio slow mode,
   17911 	 * set slow mode and try to get the PHY id again.
   17912 	 */
   17913 	rv = 0;
   17914 	if (sc->sc_type < WM_T_PCH_LPT) {
   17915 		wm_set_mdio_slow_mode_hv_locked(sc);
   17916 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   17917 		    &id1);
   17918 		rv |= wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   17919 		    &id2);
   17920 	}
   17921 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   17922 		device_printf(sc->sc_dev, "XXX return with false\n");
   17923 		return false;
   17924 	}
   17925 out:
   17926 	if (sc->sc_type >= WM_T_PCH_LPT) {
   17927 		/* Only unforce SMBus if ME is not active */
   17928 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   17929 			uint16_t phyreg;
   17930 
   17931 			/* Unforce SMBus mode in PHY */
   17932 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   17933 			    CV_SMB_CTRL, &phyreg);
   17934 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   17935 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   17936 			    CV_SMB_CTRL, phyreg);
   17937 
   17938 			/* Unforce SMBus mode in MAC */
   17939 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17940 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   17941 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17942 		}
   17943 	}
   17944 	return true;
   17945 }
   17946 
   17947 static void
   17948 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   17949 {
   17950 	uint32_t reg;
   17951 	int i;
   17952 
   17953 	/* Set PHY Config Counter to 50msec */
   17954 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   17955 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   17956 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   17957 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   17958 
   17959 	/* Toggle LANPHYPC */
   17960 	reg = CSR_READ(sc, WMREG_CTRL);
   17961 	reg |= CTRL_LANPHYPC_OVERRIDE;
   17962 	reg &= ~CTRL_LANPHYPC_VALUE;
   17963 	CSR_WRITE(sc, WMREG_CTRL, reg);
   17964 	CSR_WRITE_FLUSH(sc);
   17965 	delay(1000);
   17966 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   17967 	CSR_WRITE(sc, WMREG_CTRL, reg);
   17968 	CSR_WRITE_FLUSH(sc);
   17969 
   17970 	if (sc->sc_type < WM_T_PCH_LPT)
   17971 		delay(50 * 1000);
   17972 	else {
   17973 		i = 20;
   17974 
   17975 		do {
   17976 			delay(5 * 1000);
   17977 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   17978 		    && i--);
   17979 
   17980 		delay(30 * 1000);
   17981 	}
   17982 }
   17983 
   17984 static int
   17985 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   17986 {
   17987 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   17988 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   17989 	uint32_t rxa;
   17990 	uint16_t scale = 0, lat_enc = 0;
   17991 	int32_t obff_hwm = 0;
   17992 	int64_t lat_ns, value;
   17993 
   17994 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17995 		device_xname(sc->sc_dev), __func__));
   17996 
   17997 	if (link) {
   17998 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   17999 		uint32_t status;
   18000 		uint16_t speed;
   18001 		pcireg_t preg;
   18002 
   18003 		status = CSR_READ(sc, WMREG_STATUS);
   18004 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   18005 		case STATUS_SPEED_10:
   18006 			speed = 10;
   18007 			break;
   18008 		case STATUS_SPEED_100:
   18009 			speed = 100;
   18010 			break;
   18011 		case STATUS_SPEED_1000:
   18012 			speed = 1000;
   18013 			break;
   18014 		default:
   18015 			device_printf(sc->sc_dev, "Unknown speed "
   18016 			    "(status = %08x)\n", status);
   18017 			return -1;
   18018 		}
   18019 
   18020 		/* Rx Packet Buffer Allocation size (KB) */
   18021 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   18022 
   18023 		/*
   18024 		 * Determine the maximum latency tolerated by the device.
   18025 		 *
   18026 		 * Per the PCIe spec, the tolerated latencies are encoded as
   18027 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   18028 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   18029 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   18030 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   18031 		 */
   18032 		lat_ns = ((int64_t)rxa * 1024 -
   18033 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   18034 			+ ETHER_HDR_LEN))) * 8 * 1000;
   18035 		if (lat_ns < 0)
   18036 			lat_ns = 0;
   18037 		else
   18038 			lat_ns /= speed;
   18039 		value = lat_ns;
   18040 
   18041 		while (value > LTRV_VALUE) {
   18042 			scale ++;
   18043 			value = howmany(value, __BIT(5));
   18044 		}
   18045 		if (scale > LTRV_SCALE_MAX) {
   18046 			device_printf(sc->sc_dev,
   18047 			    "Invalid LTR latency scale %d\n", scale);
   18048 			return -1;
   18049 		}
   18050 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   18051 
   18052 		/* Determine the maximum latency tolerated by the platform */
   18053 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   18054 		    WM_PCI_LTR_CAP_LPT);
   18055 		max_snoop = preg & 0xffff;
   18056 		max_nosnoop = preg >> 16;
   18057 
   18058 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   18059 
   18060 		if (lat_enc > max_ltr_enc) {
   18061 			lat_enc = max_ltr_enc;
   18062 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   18063 			    * PCI_LTR_SCALETONS(
   18064 				    __SHIFTOUT(lat_enc,
   18065 					PCI_LTR_MAXSNOOPLAT_SCALE));
   18066 		}
   18067 
   18068 		if (lat_ns) {
   18069 			lat_ns *= speed * 1000;
   18070 			lat_ns /= 8;
   18071 			lat_ns /= 1000000000;
   18072 			obff_hwm = (int32_t)(rxa - lat_ns);
   18073 		}
   18074 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   18075 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   18076 			    "(rxa = %d, lat_ns = %d)\n",
   18077 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   18078 			return -1;
   18079 		}
   18080 	}
   18081 	/* Snoop and No-Snoop latencies the same */
   18082 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   18083 	CSR_WRITE(sc, WMREG_LTRV, reg);
   18084 
   18085 	/* Set OBFF high water mark */
   18086 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   18087 	reg |= obff_hwm;
   18088 	CSR_WRITE(sc, WMREG_SVT, reg);
   18089 
   18090 	/* Enable OBFF */
   18091 	reg = CSR_READ(sc, WMREG_SVCR);
   18092 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   18093 	CSR_WRITE(sc, WMREG_SVCR, reg);
   18094 
   18095 	return 0;
   18096 }
   18097 
   18098 /*
   18099  * I210 Errata 25 and I211 Errata 10
   18100  * Slow System Clock.
   18101  *
   18102  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   18103  */
   18104 static int
   18105 wm_pll_workaround_i210(struct wm_softc *sc)
   18106 {
   18107 	uint32_t mdicnfg, wuc;
   18108 	uint32_t reg;
   18109 	pcireg_t pcireg;
   18110 	uint32_t pmreg;
   18111 	uint16_t nvmword, tmp_nvmword;
   18112 	uint16_t phyval;
   18113 	bool wa_done = false;
   18114 	int i, rv = 0;
   18115 
   18116 	/* Get Power Management cap offset */
   18117 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   18118 	    &pmreg, NULL) == 0)
   18119 		return -1;
   18120 
   18121 	/* Save WUC and MDICNFG registers */
   18122 	wuc = CSR_READ(sc, WMREG_WUC);
   18123 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   18124 
   18125 	reg = mdicnfg & ~MDICNFG_DEST;
   18126 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   18127 
   18128 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   18129 		/*
   18130 		 * The default value of the Initialization Control Word 1
   18131 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   18132 		 */
   18133 		nvmword = INVM_DEFAULT_AL;
   18134 	}
   18135 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   18136 
   18137 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   18138 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   18139 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   18140 
   18141 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   18142 			rv = 0;
   18143 			break; /* OK */
   18144 		} else
   18145 			rv = -1;
   18146 
   18147 		wa_done = true;
   18148 		/* Directly reset the internal PHY */
   18149 		reg = CSR_READ(sc, WMREG_CTRL);
   18150 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   18151 
   18152 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   18153 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   18154 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   18155 
   18156 		CSR_WRITE(sc, WMREG_WUC, 0);
   18157 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   18158 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   18159 
   18160 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   18161 		    pmreg + PCI_PMCSR);
   18162 		pcireg |= PCI_PMCSR_STATE_D3;
   18163 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   18164 		    pmreg + PCI_PMCSR, pcireg);
   18165 		delay(1000);
   18166 		pcireg &= ~PCI_PMCSR_STATE_D3;
   18167 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   18168 		    pmreg + PCI_PMCSR, pcireg);
   18169 
   18170 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   18171 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   18172 
   18173 		/* Restore WUC register */
   18174 		CSR_WRITE(sc, WMREG_WUC, wuc);
   18175 	}
   18176 
   18177 	/* Restore MDICNFG setting */
   18178 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   18179 	if (wa_done)
   18180 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   18181 	return rv;
   18182 }
   18183 
   18184 static void
   18185 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   18186 {
   18187 	uint32_t reg;
   18188 
   18189 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   18190 		device_xname(sc->sc_dev), __func__));
   18191 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   18192 	    || (sc->sc_type == WM_T_PCH_CNP) || (sc->sc_type == WM_T_PCH_TGP));
   18193 
   18194 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   18195 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   18196 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   18197 
   18198 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   18199 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   18200 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   18201 }
   18202 
   18203 /* Sysctl functions */
   18204 static int
   18205 wm_sysctl_tdh_handler(SYSCTLFN_ARGS)
   18206 {
   18207 	struct sysctlnode node = *rnode;
   18208 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   18209 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   18210 	struct wm_softc *sc = txq->txq_sc;
   18211 	uint32_t reg;
   18212 
   18213 	reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id));
   18214 	node.sysctl_data = &reg;
   18215 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   18216 }
   18217 
   18218 static int
   18219 wm_sysctl_tdt_handler(SYSCTLFN_ARGS)
   18220 {
   18221 	struct sysctlnode node = *rnode;
   18222 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   18223 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   18224 	struct wm_softc *sc = txq->txq_sc;
   18225 	uint32_t reg;
   18226 
   18227 	reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id));
   18228 	node.sysctl_data = &reg;
   18229 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   18230 }
   18231 
   18232 #ifdef WM_DEBUG
   18233 static int
   18234 wm_sysctl_debug(SYSCTLFN_ARGS)
   18235 {
   18236 	struct sysctlnode node = *rnode;
   18237 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
   18238 	uint32_t dflags;
   18239 	int error;
   18240 
   18241 	dflags = sc->sc_debug;
   18242 	node.sysctl_data = &dflags;
   18243 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   18244 
   18245 	if (error || newp == NULL)
   18246 		return error;
   18247 
   18248 	sc->sc_debug = dflags;
   18249 	device_printf(sc->sc_dev, "TARC0: %08x\n", CSR_READ(sc, WMREG_TARC0));
   18250 	device_printf(sc->sc_dev, "TDT0: %08x\n", CSR_READ(sc, WMREG_TDT(0)));
   18251 
   18252 	return 0;
   18253 }
   18254 #endif
   18255