Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.715
      1 /*	$NetBSD: if_wm.c,v 1.715 2021/10/20 08:10:26 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.715 2021/10/20 08:10:26 msaitoh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 #include <sys/sysctl.h>
    109 #include <sys/workqueue.h>
    110 #include <sys/atomic.h>
    111 
    112 #include <sys/rndsource.h>
    113 
    114 #include <net/if.h>
    115 #include <net/if_dl.h>
    116 #include <net/if_media.h>
    117 #include <net/if_ether.h>
    118 
    119 #include <net/bpf.h>
    120 
    121 #include <net/rss_config.h>
    122 
    123 #include <netinet/in.h>			/* XXX for struct ip */
    124 #include <netinet/in_systm.h>		/* XXX for struct ip */
    125 #include <netinet/ip.h>			/* XXX for struct ip */
    126 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    127 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    128 
    129 #include <sys/bus.h>
    130 #include <sys/intr.h>
    131 #include <machine/endian.h>
    132 
    133 #include <dev/mii/mii.h>
    134 #include <dev/mii/mdio.h>
    135 #include <dev/mii/miivar.h>
    136 #include <dev/mii/miidevs.h>
    137 #include <dev/mii/mii_bitbang.h>
    138 #include <dev/mii/ikphyreg.h>
    139 #include <dev/mii/igphyreg.h>
    140 #include <dev/mii/igphyvar.h>
    141 #include <dev/mii/inbmphyreg.h>
    142 #include <dev/mii/ihphyreg.h>
    143 #include <dev/mii/makphyreg.h>
    144 
    145 #include <dev/pci/pcireg.h>
    146 #include <dev/pci/pcivar.h>
    147 #include <dev/pci/pcidevs.h>
    148 
    149 #include <dev/pci/if_wmreg.h>
    150 #include <dev/pci/if_wmvar.h>
    151 
    152 #ifdef WM_DEBUG
    153 #define	WM_DEBUG_LINK		__BIT(0)
    154 #define	WM_DEBUG_TX		__BIT(1)
    155 #define	WM_DEBUG_RX		__BIT(2)
    156 #define	WM_DEBUG_GMII		__BIT(3)
    157 #define	WM_DEBUG_MANAGE		__BIT(4)
    158 #define	WM_DEBUG_NVM		__BIT(5)
    159 #define	WM_DEBUG_INIT		__BIT(6)
    160 #define	WM_DEBUG_LOCK		__BIT(7)
    161 
    162 #if 0
    163 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
    164 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
    165 	WM_DEBUG_LOCK
    166 #endif
    167 
    168 #define	DPRINTF(sc, x, y)			  \
    169 	do {					  \
    170 		if ((sc)->sc_debug & (x))	  \
    171 			printf y;		  \
    172 	} while (0)
    173 #else
    174 #define	DPRINTF(sc, x, y)	__nothing
    175 #endif /* WM_DEBUG */
    176 
    177 #ifdef NET_MPSAFE
    178 #define WM_MPSAFE	1
    179 #define WM_CALLOUT_FLAGS	CALLOUT_MPSAFE
    180 #define WM_SOFTINT_FLAGS	SOFTINT_MPSAFE
    181 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    182 #else
    183 #define WM_CALLOUT_FLAGS	0
    184 #define WM_SOFTINT_FLAGS	0
    185 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU
    186 #endif
    187 
    188 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    189 
    190 /*
    191  * This device driver's max interrupt numbers.
    192  */
    193 #define WM_MAX_NQUEUEINTR	16
    194 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    195 
    196 #ifndef WM_DISABLE_MSI
    197 #define	WM_DISABLE_MSI 0
    198 #endif
    199 #ifndef WM_DISABLE_MSIX
    200 #define	WM_DISABLE_MSIX 0
    201 #endif
    202 
    203 int wm_disable_msi = WM_DISABLE_MSI;
    204 int wm_disable_msix = WM_DISABLE_MSIX;
    205 
    206 #ifndef WM_WATCHDOG_TIMEOUT
    207 #define WM_WATCHDOG_TIMEOUT 5
    208 #endif
    209 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    210 
    211 /*
    212  * Transmit descriptor list size.  Due to errata, we can only have
    213  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    214  * on >= 82544. We tell the upper layers that they can queue a lot
    215  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    216  * of them at a time.
    217  *
    218  * We allow up to 64 DMA segments per packet.  Pathological packet
    219  * chains containing many small mbufs have been observed in zero-copy
    220  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    221  * m_defrag() is called to reduce it.
    222  */
    223 #define	WM_NTXSEGS		64
    224 #define	WM_IFQUEUELEN		256
    225 #define	WM_TXQUEUELEN_MAX	64
    226 #define	WM_TXQUEUELEN_MAX_82547	16
    227 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    228 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    229 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    230 #define	WM_NTXDESC_82542	256
    231 #define	WM_NTXDESC_82544	4096
    232 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    233 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    234 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    235 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    236 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    237 
    238 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    239 
    240 #define	WM_TXINTERQSIZE		256
    241 
    242 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    243 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    244 #endif
    245 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    246 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    247 #endif
    248 
    249 /*
    250  * Receive descriptor list size.  We have one Rx buffer for normal
    251  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    252  * packet.  We allocate 256 receive descriptors, each with a 2k
    253  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    254  */
    255 #define	WM_NRXDESC		256U
    256 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    257 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    258 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    259 
    260 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    261 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    262 #endif
    263 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    264 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    265 #endif
    266 
    267 typedef union txdescs {
    268 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    269 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    270 } txdescs_t;
    271 
    272 typedef union rxdescs {
    273 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    274 	ext_rxdesc_t	 sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    275 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    276 } rxdescs_t;
    277 
    278 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    279 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    280 
    281 /*
    282  * Software state for transmit jobs.
    283  */
    284 struct wm_txsoft {
    285 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    286 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    287 	int txs_firstdesc;		/* first descriptor in packet */
    288 	int txs_lastdesc;		/* last descriptor in packet */
    289 	int txs_ndesc;			/* # of descriptors used */
    290 };
    291 
    292 /*
    293  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    294  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    295  * them together.
    296  */
    297 struct wm_rxsoft {
    298 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    299 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    300 };
    301 
    302 #define WM_LINKUP_TIMEOUT	50
    303 
    304 static uint16_t swfwphysem[] = {
    305 	SWFW_PHY0_SM,
    306 	SWFW_PHY1_SM,
    307 	SWFW_PHY2_SM,
    308 	SWFW_PHY3_SM
    309 };
    310 
    311 static const uint32_t wm_82580_rxpbs_table[] = {
    312 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    313 };
    314 
    315 struct wm_softc;
    316 
    317 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
    318 #if !defined(WM_EVENT_COUNTERS)
    319 #define WM_EVENT_COUNTERS 1
    320 #endif
    321 #endif
    322 
    323 #ifdef WM_EVENT_COUNTERS
    324 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    325 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    326 	struct evcnt qname##_ev_##evname;
    327 
    328 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    329 	do {								\
    330 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    331 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    332 		    "%s%02d%s", #qname, (qnum), #evname);		\
    333 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    334 		    (evtype), NULL, (xname),				\
    335 		    (q)->qname##_##evname##_evcnt_name);		\
    336 	} while (0)
    337 
    338 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    339 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    340 
    341 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    342 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    343 
    344 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    345 	evcnt_detach(&(q)->qname##_ev_##evname);
    346 #endif /* WM_EVENT_COUNTERS */
    347 
    348 struct wm_txqueue {
    349 	kmutex_t *txq_lock;		/* lock for tx operations */
    350 
    351 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    352 
    353 	/* Software state for the transmit descriptors. */
    354 	int txq_num;			/* must be a power of two */
    355 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    356 
    357 	/* TX control data structures. */
    358 	int txq_ndesc;			/* must be a power of two */
    359 	size_t txq_descsize;		/* a tx descriptor size */
    360 	txdescs_t *txq_descs_u;
    361 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    362 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    363 	int txq_desc_rseg;		/* real number of control segment */
    364 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    365 #define	txq_descs	txq_descs_u->sctxu_txdescs
    366 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    367 
    368 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    369 
    370 	int txq_free;			/* number of free Tx descriptors */
    371 	int txq_next;			/* next ready Tx descriptor */
    372 
    373 	int txq_sfree;			/* number of free Tx jobs */
    374 	int txq_snext;			/* next free Tx job */
    375 	int txq_sdirty;			/* dirty Tx jobs */
    376 
    377 	/* These 4 variables are used only on the 82547. */
    378 	int txq_fifo_size;		/* Tx FIFO size */
    379 	int txq_fifo_head;		/* current head of FIFO */
    380 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    381 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    382 
    383 	/*
    384 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    385 	 * CPUs. This queue intermediate them without block.
    386 	 */
    387 	pcq_t *txq_interq;
    388 
    389 	/*
    390 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    391 	 * to manage Tx H/W queue's busy flag.
    392 	 */
    393 	int txq_flags;			/* flags for H/W queue, see below */
    394 #define	WM_TXQ_NO_SPACE		0x1
    395 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
    396 
    397 	bool txq_stopping;
    398 
    399 	bool txq_sending;
    400 	time_t txq_lastsent;
    401 
    402 	/* Checksum flags used for previous packet */
    403 	uint32_t	txq_last_hw_cmd;
    404 	uint8_t		txq_last_hw_fields;
    405 	uint16_t	txq_last_hw_ipcs;
    406 	uint16_t	txq_last_hw_tucs;
    407 
    408 	uint32_t txq_packets;		/* for AIM */
    409 	uint32_t txq_bytes;		/* for AIM */
    410 #ifdef WM_EVENT_COUNTERS
    411 	/* TX event counters */
    412 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    413 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    414 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    415 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    416 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    417 					    /* XXX not used? */
    418 
    419 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    420 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    421 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    422 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    423 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    424 	WM_Q_EVCNT_DEFINE(txq, tsopain)	    /* Painful header manip. for TSO */
    425 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    426 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    427 					    /* other than toomanyseg */
    428 
    429 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    430 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    431 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    432 	WM_Q_EVCNT_DEFINE(txq, skipcontext) /* Tx skip wring cksum context */
    433 
    434 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    435 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    436 #endif /* WM_EVENT_COUNTERS */
    437 };
    438 
    439 struct wm_rxqueue {
    440 	kmutex_t *rxq_lock;		/* lock for rx operations */
    441 
    442 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    443 
    444 	/* Software state for the receive descriptors. */
    445 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    446 
    447 	/* RX control data structures. */
    448 	int rxq_ndesc;			/* must be a power of two */
    449 	size_t rxq_descsize;		/* a rx descriptor size */
    450 	rxdescs_t *rxq_descs_u;
    451 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    452 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    453 	int rxq_desc_rseg;		/* real number of control segment */
    454 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    455 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    456 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    457 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    458 
    459 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    460 
    461 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    462 	int rxq_discard;
    463 	int rxq_len;
    464 	struct mbuf *rxq_head;
    465 	struct mbuf *rxq_tail;
    466 	struct mbuf **rxq_tailp;
    467 
    468 	bool rxq_stopping;
    469 
    470 	uint32_t rxq_packets;		/* for AIM */
    471 	uint32_t rxq_bytes;		/* for AIM */
    472 #ifdef WM_EVENT_COUNTERS
    473 	/* RX event counters */
    474 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    475 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    476 
    477 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    478 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    479 #endif
    480 };
    481 
    482 struct wm_queue {
    483 	int wmq_id;			/* index of TX/RX queues */
    484 	int wmq_intr_idx;		/* index of MSI-X tables */
    485 
    486 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    487 	bool wmq_set_itr;
    488 
    489 	struct wm_txqueue wmq_txq;
    490 	struct wm_rxqueue wmq_rxq;
    491 	char sysctlname[32];		/* Name for sysctl */
    492 
    493 	bool wmq_txrx_use_workqueue;
    494 	struct work wmq_cookie;
    495 	void *wmq_si;
    496 };
    497 
    498 struct wm_phyop {
    499 	int (*acquire)(struct wm_softc *);
    500 	void (*release)(struct wm_softc *);
    501 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    502 	int (*writereg_locked)(device_t, int, int, uint16_t);
    503 	int reset_delay_us;
    504 	bool no_errprint;
    505 };
    506 
    507 struct wm_nvmop {
    508 	int (*acquire)(struct wm_softc *);
    509 	void (*release)(struct wm_softc *);
    510 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    511 };
    512 
    513 /*
    514  * Software state per device.
    515  */
    516 struct wm_softc {
    517 	device_t sc_dev;		/* generic device information */
    518 	bus_space_tag_t sc_st;		/* bus space tag */
    519 	bus_space_handle_t sc_sh;	/* bus space handle */
    520 	bus_size_t sc_ss;		/* bus space size */
    521 	bus_space_tag_t sc_iot;		/* I/O space tag */
    522 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    523 	bus_size_t sc_ios;		/* I/O space size */
    524 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    525 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    526 	bus_size_t sc_flashs;		/* flash registers space size */
    527 	off_t sc_flashreg_offset;	/*
    528 					 * offset to flash registers from
    529 					 * start of BAR
    530 					 */
    531 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    532 
    533 	struct ethercom sc_ethercom;	/* ethernet common data */
    534 	struct mii_data sc_mii;		/* MII/media information */
    535 
    536 	pci_chipset_tag_t sc_pc;
    537 	pcitag_t sc_pcitag;
    538 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    539 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    540 
    541 	uint16_t sc_pcidevid;		/* PCI device ID */
    542 	wm_chip_type sc_type;		/* MAC type */
    543 	int sc_rev;			/* MAC revision */
    544 	wm_phy_type sc_phytype;		/* PHY type */
    545 	uint8_t sc_sfptype;		/* SFP type */
    546 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    547 #define	WM_MEDIATYPE_UNKNOWN		0x00
    548 #define	WM_MEDIATYPE_FIBER		0x01
    549 #define	WM_MEDIATYPE_COPPER		0x02
    550 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    551 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    552 	int sc_flags;			/* flags; see below */
    553 	u_short sc_if_flags;		/* last if_flags */
    554 	int sc_ec_capenable;		/* last ec_capenable */
    555 	int sc_flowflags;		/* 802.3x flow control flags */
    556 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    557 	int sc_align_tweak;
    558 
    559 	void *sc_ihs[WM_MAX_NINTR];	/*
    560 					 * interrupt cookie.
    561 					 * - legacy and msi use sc_ihs[0] only
    562 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    563 					 */
    564 	pci_intr_handle_t *sc_intrs;	/*
    565 					 * legacy and msi use sc_intrs[0] only
    566 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    567 					 */
    568 	int sc_nintrs;			/* number of interrupts */
    569 
    570 	int sc_link_intr_idx;		/* index of MSI-X tables */
    571 
    572 	callout_t sc_tick_ch;		/* tick callout */
    573 	bool sc_core_stopping;
    574 
    575 	int sc_nvm_ver_major;
    576 	int sc_nvm_ver_minor;
    577 	int sc_nvm_ver_build;
    578 	int sc_nvm_addrbits;		/* NVM address bits */
    579 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    580 	int sc_ich8_flash_base;
    581 	int sc_ich8_flash_bank_size;
    582 	int sc_nvm_k1_enabled;
    583 
    584 	int sc_nqueues;
    585 	struct wm_queue *sc_queue;
    586 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    587 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    588 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    589 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    590 	struct workqueue *sc_queue_wq;
    591 	bool sc_txrx_use_workqueue;
    592 
    593 	int sc_affinity_offset;
    594 
    595 #ifdef WM_EVENT_COUNTERS
    596 	/* Event counters. */
    597 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    598 
    599 	/* WM_T_82542_2_1 only */
    600 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    601 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    602 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    603 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    604 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    605 #endif /* WM_EVENT_COUNTERS */
    606 
    607 	struct sysctllog *sc_sysctllog;
    608 
    609 	/* This variable are used only on the 82547. */
    610 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    611 
    612 	uint32_t sc_ctrl;		/* prototype CTRL register */
    613 #if 0
    614 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    615 #endif
    616 	uint32_t sc_icr;		/* prototype interrupt bits */
    617 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    618 	uint32_t sc_tctl;		/* prototype TCTL register */
    619 	uint32_t sc_rctl;		/* prototype RCTL register */
    620 	uint32_t sc_txcw;		/* prototype TXCW register */
    621 	uint32_t sc_tipg;		/* prototype TIPG register */
    622 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    623 	uint32_t sc_pba;		/* prototype PBA register */
    624 
    625 	int sc_tbi_linkup;		/* TBI link status */
    626 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    627 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    628 
    629 	int sc_mchash_type;		/* multicast filter offset */
    630 
    631 	krndsource_t rnd_source;	/* random source */
    632 
    633 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    634 
    635 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    636 	kmutex_t *sc_ich_phymtx;	/*
    637 					 * 82574/82583/ICH/PCH specific PHY
    638 					 * mutex. For 82574/82583, the mutex
    639 					 * is used for both PHY and NVM.
    640 					 */
    641 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    642 
    643 	struct wm_phyop phy;
    644 	struct wm_nvmop nvm;
    645 #ifdef WM_DEBUG
    646 	uint32_t sc_debug;
    647 #endif
    648 };
    649 
    650 #define WM_CORE_LOCK(_sc)						\
    651 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    652 #define WM_CORE_UNLOCK(_sc)						\
    653 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    654 #define WM_CORE_LOCKED(_sc)						\
    655 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    656 
    657 #define	WM_RXCHAIN_RESET(rxq)						\
    658 do {									\
    659 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    660 	*(rxq)->rxq_tailp = NULL;					\
    661 	(rxq)->rxq_len = 0;						\
    662 } while (/*CONSTCOND*/0)
    663 
    664 #define	WM_RXCHAIN_LINK(rxq, m)						\
    665 do {									\
    666 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    667 	(rxq)->rxq_tailp = &(m)->m_next;				\
    668 } while (/*CONSTCOND*/0)
    669 
    670 #ifdef WM_EVENT_COUNTERS
    671 #ifdef __HAVE_ATOMIC64_LOADSTORE
    672 #define	WM_EVCNT_INCR(ev)						\
    673 	atomic_store_relaxed(&((ev)->ev_count),				\
    674 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
    675 #define	WM_EVCNT_ADD(ev, val)						\
    676 	atomic_store_relaxed(&((ev)->ev_count),				\
    677 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
    678 #else
    679 #define	WM_EVCNT_INCR(ev)						\
    680 	((ev)->ev_count)++
    681 #define	WM_EVCNT_ADD(ev, val)						\
    682 	(ev)->ev_count += (val)
    683 #endif
    684 
    685 #define WM_Q_EVCNT_INCR(qname, evname)			\
    686 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    687 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    688 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    689 #else /* !WM_EVENT_COUNTERS */
    690 #define	WM_EVCNT_INCR(ev)	/* nothing */
    691 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    692 
    693 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    694 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    695 #endif /* !WM_EVENT_COUNTERS */
    696 
    697 #define	CSR_READ(sc, reg)						\
    698 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    699 #define	CSR_WRITE(sc, reg, val)						\
    700 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    701 #define	CSR_WRITE_FLUSH(sc)						\
    702 	(void)CSR_READ((sc), WMREG_STATUS)
    703 
    704 #define ICH8_FLASH_READ32(sc, reg)					\
    705 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    706 	    (reg) + sc->sc_flashreg_offset)
    707 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    708 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    709 	    (reg) + sc->sc_flashreg_offset, (data))
    710 
    711 #define ICH8_FLASH_READ16(sc, reg)					\
    712 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    713 	    (reg) + sc->sc_flashreg_offset)
    714 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    715 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    716 	    (reg) + sc->sc_flashreg_offset, (data))
    717 
    718 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    719 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    720 
    721 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    722 #define	WM_CDTXADDR_HI(txq, x)						\
    723 	(sizeof(bus_addr_t) == 8 ?					\
    724 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    725 
    726 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    727 #define	WM_CDRXADDR_HI(rxq, x)						\
    728 	(sizeof(bus_addr_t) == 8 ?					\
    729 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    730 
    731 /*
    732  * Register read/write functions.
    733  * Other than CSR_{READ|WRITE}().
    734  */
    735 #if 0
    736 static inline uint32_t wm_io_read(struct wm_softc *, int);
    737 #endif
    738 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    739 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    740     uint32_t, uint32_t);
    741 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    742 
    743 /*
    744  * Descriptor sync/init functions.
    745  */
    746 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    747 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    748 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    749 
    750 /*
    751  * Device driver interface functions and commonly used functions.
    752  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    753  */
    754 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    755 static int	wm_match(device_t, cfdata_t, void *);
    756 static void	wm_attach(device_t, device_t, void *);
    757 static int	wm_detach(device_t, int);
    758 static bool	wm_suspend(device_t, const pmf_qual_t *);
    759 static bool	wm_resume(device_t, const pmf_qual_t *);
    760 static void	wm_watchdog(struct ifnet *);
    761 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    762     uint16_t *);
    763 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    764     uint16_t *);
    765 static void	wm_tick(void *);
    766 static int	wm_ifflags_cb(struct ethercom *);
    767 static int	wm_ioctl(struct ifnet *, u_long, void *);
    768 /* MAC address related */
    769 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    770 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    771 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    772 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    773 static int	wm_rar_count(struct wm_softc *);
    774 static void	wm_set_filter(struct wm_softc *);
    775 /* Reset and init related */
    776 static void	wm_set_vlan(struct wm_softc *);
    777 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    778 static void	wm_get_auto_rd_done(struct wm_softc *);
    779 static void	wm_lan_init_done(struct wm_softc *);
    780 static void	wm_get_cfg_done(struct wm_softc *);
    781 static int	wm_phy_post_reset(struct wm_softc *);
    782 static int	wm_write_smbus_addr(struct wm_softc *);
    783 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    784 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    785 static void	wm_initialize_hardware_bits(struct wm_softc *);
    786 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    787 static int	wm_reset_phy(struct wm_softc *);
    788 static void	wm_flush_desc_rings(struct wm_softc *);
    789 static void	wm_reset(struct wm_softc *);
    790 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    791 static void	wm_rxdrain(struct wm_rxqueue *);
    792 static void	wm_init_rss(struct wm_softc *);
    793 static void	wm_adjust_qnum(struct wm_softc *, int);
    794 static inline bool	wm_is_using_msix(struct wm_softc *);
    795 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    796 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    797 static int	wm_setup_legacy(struct wm_softc *);
    798 static int	wm_setup_msix(struct wm_softc *);
    799 static int	wm_init(struct ifnet *);
    800 static int	wm_init_locked(struct ifnet *);
    801 static void	wm_init_sysctls(struct wm_softc *);
    802 static void	wm_unset_stopping_flags(struct wm_softc *);
    803 static void	wm_set_stopping_flags(struct wm_softc *);
    804 static void	wm_stop(struct ifnet *, int);
    805 static void	wm_stop_locked(struct ifnet *, bool, bool);
    806 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    807 static void	wm_82547_txfifo_stall(void *);
    808 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    809 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    810 /* DMA related */
    811 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    812 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    813 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    814 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    815     struct wm_txqueue *);
    816 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    817 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    818 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    819     struct wm_rxqueue *);
    820 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    821 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    822 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    823 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    824 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    825 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    826 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    827     struct wm_txqueue *);
    828 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    829     struct wm_rxqueue *);
    830 static int	wm_alloc_txrx_queues(struct wm_softc *);
    831 static void	wm_free_txrx_queues(struct wm_softc *);
    832 static int	wm_init_txrx_queues(struct wm_softc *);
    833 /* Start */
    834 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    835     struct wm_txsoft *, uint32_t *, uint8_t *);
    836 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    837 static void	wm_start(struct ifnet *);
    838 static void	wm_start_locked(struct ifnet *);
    839 static int	wm_transmit(struct ifnet *, struct mbuf *);
    840 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    841 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    842 		    bool);
    843 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    844     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    845 static void	wm_nq_start(struct ifnet *);
    846 static void	wm_nq_start_locked(struct ifnet *);
    847 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    848 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    849 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    850 		    bool);
    851 static void	wm_deferred_start_locked(struct wm_txqueue *);
    852 static void	wm_handle_queue(void *);
    853 static void	wm_handle_queue_work(struct work *, void *);
    854 /* Interrupt */
    855 static bool	wm_txeof(struct wm_txqueue *, u_int);
    856 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    857 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    858 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    859 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    860 static void	wm_linkintr(struct wm_softc *, uint32_t);
    861 static int	wm_intr_legacy(void *);
    862 static inline void	wm_txrxintr_disable(struct wm_queue *);
    863 static inline void	wm_txrxintr_enable(struct wm_queue *);
    864 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    865 static int	wm_txrxintr_msix(void *);
    866 static int	wm_linkintr_msix(void *);
    867 
    868 /*
    869  * Media related.
    870  * GMII, SGMII, TBI, SERDES and SFP.
    871  */
    872 /* Common */
    873 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    874 /* GMII related */
    875 static void	wm_gmii_reset(struct wm_softc *);
    876 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    877 static int	wm_get_phy_id_82575(struct wm_softc *);
    878 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    879 static int	wm_gmii_mediachange(struct ifnet *);
    880 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    881 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    882 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    883 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    884 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    885 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    886 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    887 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    888 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    889 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    890 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    891 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    892 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    893 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    894 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    895 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    896 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    897 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    898 	bool);
    899 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    900 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    901 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    902 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    903 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    904 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    905 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    906 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    907 static void	wm_gmii_statchg(struct ifnet *);
    908 /*
    909  * kumeran related (80003, ICH* and PCH*).
    910  * These functions are not for accessing MII registers but for accessing
    911  * kumeran specific registers.
    912  */
    913 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    914 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    915 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    916 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    917 /* EMI register related */
    918 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    919 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    920 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    921 /* SGMII */
    922 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    923 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
    924 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    925 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    926 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    927 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    928 /* TBI related */
    929 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    930 static void	wm_tbi_mediainit(struct wm_softc *);
    931 static int	wm_tbi_mediachange(struct ifnet *);
    932 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    933 static int	wm_check_for_link(struct wm_softc *);
    934 static void	wm_tbi_tick(struct wm_softc *);
    935 /* SERDES related */
    936 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    937 static int	wm_serdes_mediachange(struct ifnet *);
    938 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    939 static void	wm_serdes_tick(struct wm_softc *);
    940 /* SFP related */
    941 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    942 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    943 
    944 /*
    945  * NVM related.
    946  * Microwire, SPI (w/wo EERD) and Flash.
    947  */
    948 /* Misc functions */
    949 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    950 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    951 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    952 /* Microwire */
    953 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    954 /* SPI */
    955 static int	wm_nvm_ready_spi(struct wm_softc *);
    956 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    957 /* Using with EERD */
    958 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    959 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    960 /* Flash */
    961 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    962     unsigned int *);
    963 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    964 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    965 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    966     uint32_t *);
    967 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    968 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    969 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    970 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    971 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    972 /* iNVM */
    973 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    974 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    975 /* Lock, detecting NVM type, validate checksum and read */
    976 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    977 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    978 static int	wm_nvm_validate_checksum(struct wm_softc *);
    979 static void	wm_nvm_version_invm(struct wm_softc *);
    980 static void	wm_nvm_version(struct wm_softc *);
    981 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    982 
    983 /*
    984  * Hardware semaphores.
    985  * Very complexed...
    986  */
    987 static int	wm_get_null(struct wm_softc *);
    988 static void	wm_put_null(struct wm_softc *);
    989 static int	wm_get_eecd(struct wm_softc *);
    990 static void	wm_put_eecd(struct wm_softc *);
    991 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    992 static void	wm_put_swsm_semaphore(struct wm_softc *);
    993 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    994 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    995 static int	wm_get_nvm_80003(struct wm_softc *);
    996 static void	wm_put_nvm_80003(struct wm_softc *);
    997 static int	wm_get_nvm_82571(struct wm_softc *);
    998 static void	wm_put_nvm_82571(struct wm_softc *);
    999 static int	wm_get_phy_82575(struct wm_softc *);
   1000 static void	wm_put_phy_82575(struct wm_softc *);
   1001 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
   1002 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
   1003 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
   1004 static void	wm_put_swflag_ich8lan(struct wm_softc *);
   1005 static int	wm_get_nvm_ich8lan(struct wm_softc *);
   1006 static void	wm_put_nvm_ich8lan(struct wm_softc *);
   1007 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
   1008 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
   1009 
   1010 /*
   1011  * Management mode and power management related subroutines.
   1012  * BMC, AMT, suspend/resume and EEE.
   1013  */
   1014 #if 0
   1015 static int	wm_check_mng_mode(struct wm_softc *);
   1016 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
   1017 static int	wm_check_mng_mode_82574(struct wm_softc *);
   1018 static int	wm_check_mng_mode_generic(struct wm_softc *);
   1019 #endif
   1020 static int	wm_enable_mng_pass_thru(struct wm_softc *);
   1021 static bool	wm_phy_resetisblocked(struct wm_softc *);
   1022 static void	wm_get_hw_control(struct wm_softc *);
   1023 static void	wm_release_hw_control(struct wm_softc *);
   1024 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
   1025 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
   1026 static void	wm_init_manageability(struct wm_softc *);
   1027 static void	wm_release_manageability(struct wm_softc *);
   1028 static void	wm_get_wakeup(struct wm_softc *);
   1029 static int	wm_ulp_disable(struct wm_softc *);
   1030 static int	wm_enable_phy_wakeup(struct wm_softc *);
   1031 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
   1032 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
   1033 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1034 static void	wm_enable_wakeup(struct wm_softc *);
   1035 static void	wm_disable_aspm(struct wm_softc *);
   1036 /* LPLU (Low Power Link Up) */
   1037 static void	wm_lplu_d0_disable(struct wm_softc *);
   1038 /* EEE */
   1039 static int	wm_set_eee_i350(struct wm_softc *);
   1040 static int	wm_set_eee_pchlan(struct wm_softc *);
   1041 static int	wm_set_eee(struct wm_softc *);
   1042 
   1043 /*
   1044  * Workarounds (mainly PHY related).
   1045  * Basically, PHY's workarounds are in the PHY drivers.
   1046  */
   1047 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1048 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1049 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1050 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1051 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
   1052 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
   1053 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1054 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1055 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1056 static int	wm_k1_workaround_lv(struct wm_softc *);
   1057 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1058 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1059 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1060 static void	wm_reset_init_script_82575(struct wm_softc *);
   1061 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1062 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1063 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1064 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1065 static int	wm_pll_workaround_i210(struct wm_softc *);
   1066 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1067 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
   1068 static void	wm_set_linkdown_discard(struct wm_softc *);
   1069 static void	wm_clear_linkdown_discard(struct wm_softc *);
   1070 
   1071 #ifdef WM_DEBUG
   1072 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
   1073 #endif
   1074 
   1075 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1076     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1077 
   1078 /*
   1079  * Devices supported by this driver.
   1080  */
   1081 static const struct wm_product {
   1082 	pci_vendor_id_t		wmp_vendor;
   1083 	pci_product_id_t	wmp_product;
   1084 	const char		*wmp_name;
   1085 	wm_chip_type		wmp_type;
   1086 	uint32_t		wmp_flags;
   1087 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1088 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1089 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1090 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1091 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1092 } wm_products[] = {
   1093 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1094 	  "Intel i82542 1000BASE-X Ethernet",
   1095 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1096 
   1097 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1098 	  "Intel i82543GC 1000BASE-X Ethernet",
   1099 	  WM_T_82543,		WMP_F_FIBER },
   1100 
   1101 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1102 	  "Intel i82543GC 1000BASE-T Ethernet",
   1103 	  WM_T_82543,		WMP_F_COPPER },
   1104 
   1105 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1106 	  "Intel i82544EI 1000BASE-T Ethernet",
   1107 	  WM_T_82544,		WMP_F_COPPER },
   1108 
   1109 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1110 	  "Intel i82544EI 1000BASE-X Ethernet",
   1111 	  WM_T_82544,		WMP_F_FIBER },
   1112 
   1113 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1114 	  "Intel i82544GC 1000BASE-T Ethernet",
   1115 	  WM_T_82544,		WMP_F_COPPER },
   1116 
   1117 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1118 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1119 	  WM_T_82544,		WMP_F_COPPER },
   1120 
   1121 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1122 	  "Intel i82540EM 1000BASE-T Ethernet",
   1123 	  WM_T_82540,		WMP_F_COPPER },
   1124 
   1125 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1126 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1127 	  WM_T_82540,		WMP_F_COPPER },
   1128 
   1129 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1130 	  "Intel i82540EP 1000BASE-T Ethernet",
   1131 	  WM_T_82540,		WMP_F_COPPER },
   1132 
   1133 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1134 	  "Intel i82540EP 1000BASE-T Ethernet",
   1135 	  WM_T_82540,		WMP_F_COPPER },
   1136 
   1137 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1138 	  "Intel i82540EP 1000BASE-T Ethernet",
   1139 	  WM_T_82540,		WMP_F_COPPER },
   1140 
   1141 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1142 	  "Intel i82545EM 1000BASE-T Ethernet",
   1143 	  WM_T_82545,		WMP_F_COPPER },
   1144 
   1145 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1146 	  "Intel i82545GM 1000BASE-T Ethernet",
   1147 	  WM_T_82545_3,		WMP_F_COPPER },
   1148 
   1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1150 	  "Intel i82545GM 1000BASE-X Ethernet",
   1151 	  WM_T_82545_3,		WMP_F_FIBER },
   1152 
   1153 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1154 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1155 	  WM_T_82545_3,		WMP_F_SERDES },
   1156 
   1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1158 	  "Intel i82546EB 1000BASE-T Ethernet",
   1159 	  WM_T_82546,		WMP_F_COPPER },
   1160 
   1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1162 	  "Intel i82546EB 1000BASE-T Ethernet",
   1163 	  WM_T_82546,		WMP_F_COPPER },
   1164 
   1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1166 	  "Intel i82545EM 1000BASE-X Ethernet",
   1167 	  WM_T_82545,		WMP_F_FIBER },
   1168 
   1169 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1170 	  "Intel i82546EB 1000BASE-X Ethernet",
   1171 	  WM_T_82546,		WMP_F_FIBER },
   1172 
   1173 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1174 	  "Intel i82546GB 1000BASE-T Ethernet",
   1175 	  WM_T_82546_3,		WMP_F_COPPER },
   1176 
   1177 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1178 	  "Intel i82546GB 1000BASE-X Ethernet",
   1179 	  WM_T_82546_3,		WMP_F_FIBER },
   1180 
   1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1182 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1183 	  WM_T_82546_3,		WMP_F_SERDES },
   1184 
   1185 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1186 	  "i82546GB quad-port Gigabit Ethernet",
   1187 	  WM_T_82546_3,		WMP_F_COPPER },
   1188 
   1189 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1190 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1191 	  WM_T_82546_3,		WMP_F_COPPER },
   1192 
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1194 	  "Intel PRO/1000MT (82546GB)",
   1195 	  WM_T_82546_3,		WMP_F_COPPER },
   1196 
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1198 	  "Intel i82541EI 1000BASE-T Ethernet",
   1199 	  WM_T_82541,		WMP_F_COPPER },
   1200 
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1202 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1203 	  WM_T_82541,		WMP_F_COPPER },
   1204 
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1206 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1207 	  WM_T_82541,		WMP_F_COPPER },
   1208 
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1210 	  "Intel i82541ER 1000BASE-T Ethernet",
   1211 	  WM_T_82541_2,		WMP_F_COPPER },
   1212 
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1214 	  "Intel i82541GI 1000BASE-T Ethernet",
   1215 	  WM_T_82541_2,		WMP_F_COPPER },
   1216 
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1218 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1219 	  WM_T_82541_2,		WMP_F_COPPER },
   1220 
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1222 	  "Intel i82541PI 1000BASE-T Ethernet",
   1223 	  WM_T_82541_2,		WMP_F_COPPER },
   1224 
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1226 	  "Intel i82547EI 1000BASE-T Ethernet",
   1227 	  WM_T_82547,		WMP_F_COPPER },
   1228 
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1230 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1231 	  WM_T_82547,		WMP_F_COPPER },
   1232 
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1234 	  "Intel i82547GI 1000BASE-T Ethernet",
   1235 	  WM_T_82547_2,		WMP_F_COPPER },
   1236 
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1238 	  "Intel PRO/1000 PT (82571EB)",
   1239 	  WM_T_82571,		WMP_F_COPPER },
   1240 
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1242 	  "Intel PRO/1000 PF (82571EB)",
   1243 	  WM_T_82571,		WMP_F_FIBER },
   1244 
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1246 	  "Intel PRO/1000 PB (82571EB)",
   1247 	  WM_T_82571,		WMP_F_SERDES },
   1248 
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1250 	  "Intel PRO/1000 QT (82571EB)",
   1251 	  WM_T_82571,		WMP_F_COPPER },
   1252 
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1254 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1255 	  WM_T_82571,		WMP_F_COPPER },
   1256 
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1258 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1259 	  WM_T_82571,		WMP_F_COPPER },
   1260 
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1262 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1263 	  WM_T_82571,		WMP_F_SERDES },
   1264 
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1266 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1267 	  WM_T_82571,		WMP_F_SERDES },
   1268 
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1270 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1271 	  WM_T_82571,		WMP_F_FIBER },
   1272 
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1274 	  "Intel i82572EI 1000baseT Ethernet",
   1275 	  WM_T_82572,		WMP_F_COPPER },
   1276 
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1278 	  "Intel i82572EI 1000baseX Ethernet",
   1279 	  WM_T_82572,		WMP_F_FIBER },
   1280 
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1282 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1283 	  WM_T_82572,		WMP_F_SERDES },
   1284 
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1286 	  "Intel i82572EI 1000baseT Ethernet",
   1287 	  WM_T_82572,		WMP_F_COPPER },
   1288 
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1290 	  "Intel i82573E",
   1291 	  WM_T_82573,		WMP_F_COPPER },
   1292 
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1294 	  "Intel i82573E IAMT",
   1295 	  WM_T_82573,		WMP_F_COPPER },
   1296 
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1298 	  "Intel i82573L Gigabit Ethernet",
   1299 	  WM_T_82573,		WMP_F_COPPER },
   1300 
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1302 	  "Intel i82574L",
   1303 	  WM_T_82574,		WMP_F_COPPER },
   1304 
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1306 	  "Intel i82574L",
   1307 	  WM_T_82574,		WMP_F_COPPER },
   1308 
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1310 	  "Intel i82583V",
   1311 	  WM_T_82583,		WMP_F_COPPER },
   1312 
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1314 	  "i80003 dual 1000baseT Ethernet",
   1315 	  WM_T_80003,		WMP_F_COPPER },
   1316 
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1318 	  "i80003 dual 1000baseX Ethernet",
   1319 	  WM_T_80003,		WMP_F_COPPER },
   1320 
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1322 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1323 	  WM_T_80003,		WMP_F_SERDES },
   1324 
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1326 	  "Intel i80003 1000baseT Ethernet",
   1327 	  WM_T_80003,		WMP_F_COPPER },
   1328 
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1330 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1331 	  WM_T_80003,		WMP_F_SERDES },
   1332 
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1334 	  "Intel i82801H (M_AMT) LAN Controller",
   1335 	  WM_T_ICH8,		WMP_F_COPPER },
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1337 	  "Intel i82801H (AMT) LAN Controller",
   1338 	  WM_T_ICH8,		WMP_F_COPPER },
   1339 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1340 	  "Intel i82801H LAN Controller",
   1341 	  WM_T_ICH8,		WMP_F_COPPER },
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1343 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1344 	  WM_T_ICH8,		WMP_F_COPPER },
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1346 	  "Intel i82801H (M) LAN Controller",
   1347 	  WM_T_ICH8,		WMP_F_COPPER },
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1349 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1350 	  WM_T_ICH8,		WMP_F_COPPER },
   1351 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1352 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1353 	  WM_T_ICH8,		WMP_F_COPPER },
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1355 	  "82567V-3 LAN Controller",
   1356 	  WM_T_ICH8,		WMP_F_COPPER },
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1358 	  "82801I (AMT) LAN Controller",
   1359 	  WM_T_ICH9,		WMP_F_COPPER },
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1361 	  "82801I 10/100 LAN Controller",
   1362 	  WM_T_ICH9,		WMP_F_COPPER },
   1363 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1364 	  "82801I (G) 10/100 LAN Controller",
   1365 	  WM_T_ICH9,		WMP_F_COPPER },
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1367 	  "82801I (GT) 10/100 LAN Controller",
   1368 	  WM_T_ICH9,		WMP_F_COPPER },
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1370 	  "82801I (C) LAN Controller",
   1371 	  WM_T_ICH9,		WMP_F_COPPER },
   1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1373 	  "82801I mobile LAN Controller",
   1374 	  WM_T_ICH9,		WMP_F_COPPER },
   1375 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1376 	  "82801I mobile (V) LAN Controller",
   1377 	  WM_T_ICH9,		WMP_F_COPPER },
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1379 	  "82801I mobile (AMT) LAN Controller",
   1380 	  WM_T_ICH9,		WMP_F_COPPER },
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1382 	  "82567LM-4 LAN Controller",
   1383 	  WM_T_ICH9,		WMP_F_COPPER },
   1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1385 	  "82567LM-2 LAN Controller",
   1386 	  WM_T_ICH10,		WMP_F_COPPER },
   1387 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1388 	  "82567LF-2 LAN Controller",
   1389 	  WM_T_ICH10,		WMP_F_COPPER },
   1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1391 	  "82567LM-3 LAN Controller",
   1392 	  WM_T_ICH10,		WMP_F_COPPER },
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1394 	  "82567LF-3 LAN Controller",
   1395 	  WM_T_ICH10,		WMP_F_COPPER },
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1397 	  "82567V-2 LAN Controller",
   1398 	  WM_T_ICH10,		WMP_F_COPPER },
   1399 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1400 	  "82567V-3? LAN Controller",
   1401 	  WM_T_ICH10,		WMP_F_COPPER },
   1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1403 	  "HANKSVILLE LAN Controller",
   1404 	  WM_T_ICH10,		WMP_F_COPPER },
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1406 	  "PCH LAN (82577LM) Controller",
   1407 	  WM_T_PCH,		WMP_F_COPPER },
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1409 	  "PCH LAN (82577LC) Controller",
   1410 	  WM_T_PCH,		WMP_F_COPPER },
   1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1412 	  "PCH LAN (82578DM) Controller",
   1413 	  WM_T_PCH,		WMP_F_COPPER },
   1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1415 	  "PCH LAN (82578DC) Controller",
   1416 	  WM_T_PCH,		WMP_F_COPPER },
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1418 	  "PCH2 LAN (82579LM) Controller",
   1419 	  WM_T_PCH2,		WMP_F_COPPER },
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1421 	  "PCH2 LAN (82579V) Controller",
   1422 	  WM_T_PCH2,		WMP_F_COPPER },
   1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1424 	  "82575EB dual-1000baseT Ethernet",
   1425 	  WM_T_82575,		WMP_F_COPPER },
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1427 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1428 	  WM_T_82575,		WMP_F_SERDES },
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1430 	  "82575GB quad-1000baseT Ethernet",
   1431 	  WM_T_82575,		WMP_F_COPPER },
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1433 	  "82575GB quad-1000baseT Ethernet (PM)",
   1434 	  WM_T_82575,		WMP_F_COPPER },
   1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1436 	  "82576 1000BaseT Ethernet",
   1437 	  WM_T_82576,		WMP_F_COPPER },
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1439 	  "82576 1000BaseX Ethernet",
   1440 	  WM_T_82576,		WMP_F_FIBER },
   1441 
   1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1443 	  "82576 gigabit Ethernet (SERDES)",
   1444 	  WM_T_82576,		WMP_F_SERDES },
   1445 
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1447 	  "82576 quad-1000BaseT Ethernet",
   1448 	  WM_T_82576,		WMP_F_COPPER },
   1449 
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1451 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1452 	  WM_T_82576,		WMP_F_COPPER },
   1453 
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1455 	  "82576 gigabit Ethernet",
   1456 	  WM_T_82576,		WMP_F_COPPER },
   1457 
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1459 	  "82576 gigabit Ethernet (SERDES)",
   1460 	  WM_T_82576,		WMP_F_SERDES },
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1462 	  "82576 quad-gigabit Ethernet (SERDES)",
   1463 	  WM_T_82576,		WMP_F_SERDES },
   1464 
   1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1466 	  "82580 1000BaseT Ethernet",
   1467 	  WM_T_82580,		WMP_F_COPPER },
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1469 	  "82580 1000BaseX Ethernet",
   1470 	  WM_T_82580,		WMP_F_FIBER },
   1471 
   1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1473 	  "82580 1000BaseT Ethernet (SERDES)",
   1474 	  WM_T_82580,		WMP_F_SERDES },
   1475 
   1476 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1477 	  "82580 gigabit Ethernet (SGMII)",
   1478 	  WM_T_82580,		WMP_F_COPPER },
   1479 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1480 	  "82580 dual-1000BaseT Ethernet",
   1481 	  WM_T_82580,		WMP_F_COPPER },
   1482 
   1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1484 	  "82580 quad-1000BaseX Ethernet",
   1485 	  WM_T_82580,		WMP_F_FIBER },
   1486 
   1487 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1488 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1489 	  WM_T_82580,		WMP_F_COPPER },
   1490 
   1491 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1492 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1493 	  WM_T_82580,		WMP_F_SERDES },
   1494 
   1495 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1496 	  "DH89XXCC 1000BASE-KX Ethernet",
   1497 	  WM_T_82580,		WMP_F_SERDES },
   1498 
   1499 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1500 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1501 	  WM_T_82580,		WMP_F_SERDES },
   1502 
   1503 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1504 	  "I350 Gigabit Network Connection",
   1505 	  WM_T_I350,		WMP_F_COPPER },
   1506 
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1508 	  "I350 Gigabit Fiber Network Connection",
   1509 	  WM_T_I350,		WMP_F_FIBER },
   1510 
   1511 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1512 	  "I350 Gigabit Backplane Connection",
   1513 	  WM_T_I350,		WMP_F_SERDES },
   1514 
   1515 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1516 	  "I350 Quad Port Gigabit Ethernet",
   1517 	  WM_T_I350,		WMP_F_SERDES },
   1518 
   1519 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1520 	  "I350 Gigabit Connection",
   1521 	  WM_T_I350,		WMP_F_COPPER },
   1522 
   1523 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1524 	  "I354 Gigabit Ethernet (KX)",
   1525 	  WM_T_I354,		WMP_F_SERDES },
   1526 
   1527 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1528 	  "I354 Gigabit Ethernet (SGMII)",
   1529 	  WM_T_I354,		WMP_F_COPPER },
   1530 
   1531 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1532 	  "I354 Gigabit Ethernet (2.5G)",
   1533 	  WM_T_I354,		WMP_F_COPPER },
   1534 
   1535 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1536 	  "I210-T1 Ethernet Server Adapter",
   1537 	  WM_T_I210,		WMP_F_COPPER },
   1538 
   1539 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1540 	  "I210 Ethernet (Copper OEM)",
   1541 	  WM_T_I210,		WMP_F_COPPER },
   1542 
   1543 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1544 	  "I210 Ethernet (Copper IT)",
   1545 	  WM_T_I210,		WMP_F_COPPER },
   1546 
   1547 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1548 	  "I210 Ethernet (Copper, FLASH less)",
   1549 	  WM_T_I210,		WMP_F_COPPER },
   1550 
   1551 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1552 	  "I210 Gigabit Ethernet (Fiber)",
   1553 	  WM_T_I210,		WMP_F_FIBER },
   1554 
   1555 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1556 	  "I210 Gigabit Ethernet (SERDES)",
   1557 	  WM_T_I210,		WMP_F_SERDES },
   1558 
   1559 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1560 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1561 	  WM_T_I210,		WMP_F_SERDES },
   1562 
   1563 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1564 	  "I210 Gigabit Ethernet (SGMII)",
   1565 	  WM_T_I210,		WMP_F_COPPER },
   1566 
   1567 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1568 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1569 	  WM_T_I210,		WMP_F_COPPER },
   1570 
   1571 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1572 	  "I211 Ethernet (COPPER)",
   1573 	  WM_T_I211,		WMP_F_COPPER },
   1574 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1575 	  "I217 V Ethernet Connection",
   1576 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1577 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1578 	  "I217 LM Ethernet Connection",
   1579 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1580 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1581 	  "I218 V Ethernet Connection",
   1582 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1583 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1584 	  "I218 V Ethernet Connection",
   1585 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1586 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1587 	  "I218 V Ethernet Connection",
   1588 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1589 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1590 	  "I218 LM Ethernet Connection",
   1591 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1592 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1593 	  "I218 LM Ethernet Connection",
   1594 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1595 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1596 	  "I218 LM Ethernet Connection",
   1597 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1598 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1599 	  "I219 LM Ethernet Connection",
   1600 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1601 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1602 	  "I219 LM (2) Ethernet Connection",
   1603 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1604 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1605 	  "I219 LM (3) Ethernet Connection",
   1606 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1607 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1608 	  "I219 LM (4) Ethernet Connection",
   1609 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1610 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1611 	  "I219 LM (5) Ethernet Connection",
   1612 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1613 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1614 	  "I219 LM (6) Ethernet Connection",
   1615 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1616 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1617 	  "I219 LM (7) Ethernet Connection",
   1618 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1619 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1620 	  "I219 LM (8) Ethernet Connection",
   1621 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1622 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1623 	  "I219 LM (9) Ethernet Connection",
   1624 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1625 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1626 	  "I219 LM (10) Ethernet Connection",
   1627 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1628 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1629 	  "I219 LM (11) Ethernet Connection",
   1630 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1631 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1632 	  "I219 LM (12) Ethernet Connection",
   1633 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1634 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1635 	  "I219 LM (13) Ethernet Connection",
   1636 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1637 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1638 	  "I219 LM (14) Ethernet Connection",
   1639 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1640 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1641 	  "I219 LM (15) Ethernet Connection",
   1642 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1643 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM16,
   1644 	  "I219 LM (16) Ethernet Connection",
   1645 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1646 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM17,
   1647 	  "I219 LM (17) Ethernet Connection",
   1648 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1649 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM18,
   1650 	  "I219 LM (18) Ethernet Connection",
   1651 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1652 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM19,
   1653 	  "I219 LM (19) Ethernet Connection",
   1654 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1655 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1656 	  "I219 V Ethernet Connection",
   1657 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1658 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1659 	  "I219 V (2) Ethernet Connection",
   1660 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1661 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1662 	  "I219 V (4) Ethernet Connection",
   1663 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1664 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1665 	  "I219 V (5) Ethernet Connection",
   1666 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1667 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1668 	  "I219 V (6) Ethernet Connection",
   1669 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1670 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1671 	  "I219 V (7) Ethernet Connection",
   1672 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1673 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1674 	  "I219 V (8) Ethernet Connection",
   1675 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1676 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1677 	  "I219 V (9) Ethernet Connection",
   1678 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1679 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1680 	  "I219 V (10) Ethernet Connection",
   1681 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1682 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1683 	  "I219 V (11) Ethernet Connection",
   1684 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1685 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1686 	  "I219 V (12) Ethernet Connection",
   1687 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1688 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1689 	  "I219 V (13) Ethernet Connection",
   1690 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1691 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1692 	  "I219 V (14) Ethernet Connection",
   1693 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1694 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V15,
   1695 	  "I219 V (15) Ethernet Connection",
   1696 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1697 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V16,
   1698 	  "I219 V (16) Ethernet Connection",
   1699 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1700 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V17,
   1701 	  "I219 V (17) Ethernet Connection",
   1702 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1703 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V18,
   1704 	  "I219 V (18) Ethernet Connection",
   1705 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1706 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V19,
   1707 	  "I219 V (19) Ethernet Connection",
   1708 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1709 	{ 0,			0,
   1710 	  NULL,
   1711 	  0,			0 },
   1712 };
   1713 
   1714 /*
   1715  * Register read/write functions.
   1716  * Other than CSR_{READ|WRITE}().
   1717  */
   1718 
   1719 #if 0 /* Not currently used */
   1720 static inline uint32_t
   1721 wm_io_read(struct wm_softc *sc, int reg)
   1722 {
   1723 
   1724 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1725 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1726 }
   1727 #endif
   1728 
   1729 static inline void
   1730 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1731 {
   1732 
   1733 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1734 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1735 }
   1736 
   1737 static inline void
   1738 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1739     uint32_t data)
   1740 {
   1741 	uint32_t regval;
   1742 	int i;
   1743 
   1744 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1745 
   1746 	CSR_WRITE(sc, reg, regval);
   1747 
   1748 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1749 		delay(5);
   1750 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1751 			break;
   1752 	}
   1753 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1754 		aprint_error("%s: WARNING:"
   1755 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1756 		    device_xname(sc->sc_dev), reg);
   1757 	}
   1758 }
   1759 
   1760 static inline void
   1761 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1762 {
   1763 	wa->wa_low = htole32(v & 0xffffffffU);
   1764 	if (sizeof(bus_addr_t) == 8)
   1765 		wa->wa_high = htole32((uint64_t) v >> 32);
   1766 	else
   1767 		wa->wa_high = 0;
   1768 }
   1769 
   1770 /*
   1771  * Descriptor sync/init functions.
   1772  */
   1773 static inline void
   1774 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1775 {
   1776 	struct wm_softc *sc = txq->txq_sc;
   1777 
   1778 	/* If it will wrap around, sync to the end of the ring. */
   1779 	if ((start + num) > WM_NTXDESC(txq)) {
   1780 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1781 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1782 		    (WM_NTXDESC(txq) - start), ops);
   1783 		num -= (WM_NTXDESC(txq) - start);
   1784 		start = 0;
   1785 	}
   1786 
   1787 	/* Now sync whatever is left. */
   1788 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1789 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1790 }
   1791 
   1792 static inline void
   1793 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1794 {
   1795 	struct wm_softc *sc = rxq->rxq_sc;
   1796 
   1797 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1798 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1799 }
   1800 
   1801 static inline void
   1802 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1803 {
   1804 	struct wm_softc *sc = rxq->rxq_sc;
   1805 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1806 	struct mbuf *m = rxs->rxs_mbuf;
   1807 
   1808 	/*
   1809 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1810 	 * so that the payload after the Ethernet header is aligned
   1811 	 * to a 4-byte boundary.
   1812 
   1813 	 * XXX BRAINDAMAGE ALERT!
   1814 	 * The stupid chip uses the same size for every buffer, which
   1815 	 * is set in the Receive Control register.  We are using the 2K
   1816 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1817 	 * reason, we can't "scoot" packets longer than the standard
   1818 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1819 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1820 	 * the upper layer copy the headers.
   1821 	 */
   1822 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1823 
   1824 	if (sc->sc_type == WM_T_82574) {
   1825 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1826 		rxd->erx_data.erxd_addr =
   1827 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1828 		rxd->erx_data.erxd_dd = 0;
   1829 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1830 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1831 
   1832 		rxd->nqrx_data.nrxd_paddr =
   1833 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1834 		/* Currently, split header is not supported. */
   1835 		rxd->nqrx_data.nrxd_haddr = 0;
   1836 	} else {
   1837 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1838 
   1839 		wm_set_dma_addr(&rxd->wrx_addr,
   1840 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1841 		rxd->wrx_len = 0;
   1842 		rxd->wrx_cksum = 0;
   1843 		rxd->wrx_status = 0;
   1844 		rxd->wrx_errors = 0;
   1845 		rxd->wrx_special = 0;
   1846 	}
   1847 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1848 
   1849 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1850 }
   1851 
   1852 /*
   1853  * Device driver interface functions and commonly used functions.
   1854  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1855  */
   1856 
   1857 /* Lookup supported device table */
   1858 static const struct wm_product *
   1859 wm_lookup(const struct pci_attach_args *pa)
   1860 {
   1861 	const struct wm_product *wmp;
   1862 
   1863 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1864 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1865 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1866 			return wmp;
   1867 	}
   1868 	return NULL;
   1869 }
   1870 
   1871 /* The match function (ca_match) */
   1872 static int
   1873 wm_match(device_t parent, cfdata_t cf, void *aux)
   1874 {
   1875 	struct pci_attach_args *pa = aux;
   1876 
   1877 	if (wm_lookup(pa) != NULL)
   1878 		return 1;
   1879 
   1880 	return 0;
   1881 }
   1882 
   1883 /* The attach function (ca_attach) */
   1884 static void
   1885 wm_attach(device_t parent, device_t self, void *aux)
   1886 {
   1887 	struct wm_softc *sc = device_private(self);
   1888 	struct pci_attach_args *pa = aux;
   1889 	prop_dictionary_t dict;
   1890 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1891 	pci_chipset_tag_t pc = pa->pa_pc;
   1892 	int counts[PCI_INTR_TYPE_SIZE];
   1893 	pci_intr_type_t max_type;
   1894 	const char *eetype, *xname;
   1895 	bus_space_tag_t memt;
   1896 	bus_space_handle_t memh;
   1897 	bus_size_t memsize;
   1898 	int memh_valid;
   1899 	int i, error;
   1900 	const struct wm_product *wmp;
   1901 	prop_data_t ea;
   1902 	prop_number_t pn;
   1903 	uint8_t enaddr[ETHER_ADDR_LEN];
   1904 	char buf[256];
   1905 	char wqname[MAXCOMLEN];
   1906 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1907 	pcireg_t preg, memtype;
   1908 	uint16_t eeprom_data, apme_mask;
   1909 	bool force_clear_smbi;
   1910 	uint32_t link_mode;
   1911 	uint32_t reg;
   1912 
   1913 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
   1914 	sc->sc_debug = WM_DEBUG_DEFAULT;
   1915 #endif
   1916 	sc->sc_dev = self;
   1917 	callout_init(&sc->sc_tick_ch, WM_CALLOUT_FLAGS);
   1918 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   1919 	sc->sc_core_stopping = false;
   1920 
   1921 	wmp = wm_lookup(pa);
   1922 #ifdef DIAGNOSTIC
   1923 	if (wmp == NULL) {
   1924 		printf("\n");
   1925 		panic("wm_attach: impossible");
   1926 	}
   1927 #endif
   1928 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1929 
   1930 	sc->sc_pc = pa->pa_pc;
   1931 	sc->sc_pcitag = pa->pa_tag;
   1932 
   1933 	if (pci_dma64_available(pa))
   1934 		sc->sc_dmat = pa->pa_dmat64;
   1935 	else
   1936 		sc->sc_dmat = pa->pa_dmat;
   1937 
   1938 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1939 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1940 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1941 
   1942 	sc->sc_type = wmp->wmp_type;
   1943 
   1944 	/* Set default function pointers */
   1945 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1946 	sc->phy.release = sc->nvm.release = wm_put_null;
   1947 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1948 
   1949 	if (sc->sc_type < WM_T_82543) {
   1950 		if (sc->sc_rev < 2) {
   1951 			aprint_error_dev(sc->sc_dev,
   1952 			    "i82542 must be at least rev. 2\n");
   1953 			return;
   1954 		}
   1955 		if (sc->sc_rev < 3)
   1956 			sc->sc_type = WM_T_82542_2_0;
   1957 	}
   1958 
   1959 	/*
   1960 	 * Disable MSI for Errata:
   1961 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1962 	 *
   1963 	 *  82544: Errata 25
   1964 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1965 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1966 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1967 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1968 	 *
   1969 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1970 	 *
   1971 	 *  82571 & 82572: Errata 63
   1972 	 */
   1973 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1974 	    || (sc->sc_type == WM_T_82572))
   1975 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1976 
   1977 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1978 	    || (sc->sc_type == WM_T_82580)
   1979 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1980 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1981 		sc->sc_flags |= WM_F_NEWQUEUE;
   1982 
   1983 	/* Set device properties (mactype) */
   1984 	dict = device_properties(sc->sc_dev);
   1985 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1986 
   1987 	/*
   1988 	 * Map the device.  All devices support memory-mapped acccess,
   1989 	 * and it is really required for normal operation.
   1990 	 */
   1991 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1992 	switch (memtype) {
   1993 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1994 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1995 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1996 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1997 		break;
   1998 	default:
   1999 		memh_valid = 0;
   2000 		break;
   2001 	}
   2002 
   2003 	if (memh_valid) {
   2004 		sc->sc_st = memt;
   2005 		sc->sc_sh = memh;
   2006 		sc->sc_ss = memsize;
   2007 	} else {
   2008 		aprint_error_dev(sc->sc_dev,
   2009 		    "unable to map device registers\n");
   2010 		return;
   2011 	}
   2012 
   2013 	/*
   2014 	 * In addition, i82544 and later support I/O mapped indirect
   2015 	 * register access.  It is not desirable (nor supported in
   2016 	 * this driver) to use it for normal operation, though it is
   2017 	 * required to work around bugs in some chip versions.
   2018 	 */
   2019 	switch (sc->sc_type) {
   2020 	case WM_T_82544:
   2021 	case WM_T_82541:
   2022 	case WM_T_82541_2:
   2023 	case WM_T_82547:
   2024 	case WM_T_82547_2:
   2025 		/* First we have to find the I/O BAR. */
   2026 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   2027 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   2028 			if (memtype == PCI_MAPREG_TYPE_IO)
   2029 				break;
   2030 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   2031 			    PCI_MAPREG_MEM_TYPE_64BIT)
   2032 				i += 4;	/* skip high bits, too */
   2033 		}
   2034 		if (i < PCI_MAPREG_END) {
   2035 			/*
   2036 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   2037 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   2038 			 * It's no problem because newer chips has no this
   2039 			 * bug.
   2040 			 *
   2041 			 * The i8254x doesn't apparently respond when the
   2042 			 * I/O BAR is 0, which looks somewhat like it's not
   2043 			 * been configured.
   2044 			 */
   2045 			preg = pci_conf_read(pc, pa->pa_tag, i);
   2046 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   2047 				aprint_error_dev(sc->sc_dev,
   2048 				    "WARNING: I/O BAR at zero.\n");
   2049 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   2050 					0, &sc->sc_iot, &sc->sc_ioh,
   2051 					NULL, &sc->sc_ios) == 0) {
   2052 				sc->sc_flags |= WM_F_IOH_VALID;
   2053 			} else
   2054 				aprint_error_dev(sc->sc_dev,
   2055 				    "WARNING: unable to map I/O space\n");
   2056 		}
   2057 		break;
   2058 	default:
   2059 		break;
   2060 	}
   2061 
   2062 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   2063 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   2064 	preg |= PCI_COMMAND_MASTER_ENABLE;
   2065 	if (sc->sc_type < WM_T_82542_2_1)
   2066 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   2067 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   2068 
   2069 	/* Power up chip */
   2070 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   2071 	    && error != EOPNOTSUPP) {
   2072 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   2073 		return;
   2074 	}
   2075 
   2076 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   2077 	/*
   2078 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   2079 	 * resource.
   2080 	 */
   2081 	if (sc->sc_nqueues > 1) {
   2082 		max_type = PCI_INTR_TYPE_MSIX;
   2083 		/*
   2084 		 *  82583 has a MSI-X capability in the PCI configuration space
   2085 		 * but it doesn't support it. At least the document doesn't
   2086 		 * say anything about MSI-X.
   2087 		 */
   2088 		counts[PCI_INTR_TYPE_MSIX]
   2089 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2090 	} else {
   2091 		max_type = PCI_INTR_TYPE_MSI;
   2092 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2093 	}
   2094 
   2095 	/* Allocation settings */
   2096 	counts[PCI_INTR_TYPE_MSI] = 1;
   2097 	counts[PCI_INTR_TYPE_INTX] = 1;
   2098 	/* overridden by disable flags */
   2099 	if (wm_disable_msi != 0) {
   2100 		counts[PCI_INTR_TYPE_MSI] = 0;
   2101 		if (wm_disable_msix != 0) {
   2102 			max_type = PCI_INTR_TYPE_INTX;
   2103 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2104 		}
   2105 	} else if (wm_disable_msix != 0) {
   2106 		max_type = PCI_INTR_TYPE_MSI;
   2107 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2108 	}
   2109 
   2110 alloc_retry:
   2111 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2112 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2113 		return;
   2114 	}
   2115 
   2116 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2117 		error = wm_setup_msix(sc);
   2118 		if (error) {
   2119 			pci_intr_release(pc, sc->sc_intrs,
   2120 			    counts[PCI_INTR_TYPE_MSIX]);
   2121 
   2122 			/* Setup for MSI: Disable MSI-X */
   2123 			max_type = PCI_INTR_TYPE_MSI;
   2124 			counts[PCI_INTR_TYPE_MSI] = 1;
   2125 			counts[PCI_INTR_TYPE_INTX] = 1;
   2126 			goto alloc_retry;
   2127 		}
   2128 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2129 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2130 		error = wm_setup_legacy(sc);
   2131 		if (error) {
   2132 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2133 			    counts[PCI_INTR_TYPE_MSI]);
   2134 
   2135 			/* The next try is for INTx: Disable MSI */
   2136 			max_type = PCI_INTR_TYPE_INTX;
   2137 			counts[PCI_INTR_TYPE_INTX] = 1;
   2138 			goto alloc_retry;
   2139 		}
   2140 	} else {
   2141 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2142 		error = wm_setup_legacy(sc);
   2143 		if (error) {
   2144 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2145 			    counts[PCI_INTR_TYPE_INTX]);
   2146 			return;
   2147 		}
   2148 	}
   2149 
   2150 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2151 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2152 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2153 	    WM_WORKQUEUE_FLAGS);
   2154 	if (error) {
   2155 		aprint_error_dev(sc->sc_dev,
   2156 		    "unable to create workqueue\n");
   2157 		goto out;
   2158 	}
   2159 
   2160 	/*
   2161 	 * Check the function ID (unit number of the chip).
   2162 	 */
   2163 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2164 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2165 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2166 	    || (sc->sc_type == WM_T_82580)
   2167 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2168 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2169 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2170 	else
   2171 		sc->sc_funcid = 0;
   2172 
   2173 	/*
   2174 	 * Determine a few things about the bus we're connected to.
   2175 	 */
   2176 	if (sc->sc_type < WM_T_82543) {
   2177 		/* We don't really know the bus characteristics here. */
   2178 		sc->sc_bus_speed = 33;
   2179 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2180 		/*
   2181 		 * CSA (Communication Streaming Architecture) is about as fast
   2182 		 * a 32-bit 66MHz PCI Bus.
   2183 		 */
   2184 		sc->sc_flags |= WM_F_CSA;
   2185 		sc->sc_bus_speed = 66;
   2186 		aprint_verbose_dev(sc->sc_dev,
   2187 		    "Communication Streaming Architecture\n");
   2188 		if (sc->sc_type == WM_T_82547) {
   2189 			callout_init(&sc->sc_txfifo_ch, WM_CALLOUT_FLAGS);
   2190 			callout_setfunc(&sc->sc_txfifo_ch,
   2191 			    wm_82547_txfifo_stall, sc);
   2192 			aprint_verbose_dev(sc->sc_dev,
   2193 			    "using 82547 Tx FIFO stall work-around\n");
   2194 		}
   2195 	} else if (sc->sc_type >= WM_T_82571) {
   2196 		sc->sc_flags |= WM_F_PCIE;
   2197 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2198 		    && (sc->sc_type != WM_T_ICH10)
   2199 		    && (sc->sc_type != WM_T_PCH)
   2200 		    && (sc->sc_type != WM_T_PCH2)
   2201 		    && (sc->sc_type != WM_T_PCH_LPT)
   2202 		    && (sc->sc_type != WM_T_PCH_SPT)
   2203 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2204 			/* ICH* and PCH* have no PCIe capability registers */
   2205 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2206 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2207 				NULL) == 0)
   2208 				aprint_error_dev(sc->sc_dev,
   2209 				    "unable to find PCIe capability\n");
   2210 		}
   2211 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2212 	} else {
   2213 		reg = CSR_READ(sc, WMREG_STATUS);
   2214 		if (reg & STATUS_BUS64)
   2215 			sc->sc_flags |= WM_F_BUS64;
   2216 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2217 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2218 
   2219 			sc->sc_flags |= WM_F_PCIX;
   2220 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2221 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2222 				aprint_error_dev(sc->sc_dev,
   2223 				    "unable to find PCIX capability\n");
   2224 			else if (sc->sc_type != WM_T_82545_3 &&
   2225 				 sc->sc_type != WM_T_82546_3) {
   2226 				/*
   2227 				 * Work around a problem caused by the BIOS
   2228 				 * setting the max memory read byte count
   2229 				 * incorrectly.
   2230 				 */
   2231 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2232 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2233 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2234 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2235 
   2236 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2237 				    PCIX_CMD_BYTECNT_SHIFT;
   2238 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2239 				    PCIX_STATUS_MAXB_SHIFT;
   2240 				if (bytecnt > maxb) {
   2241 					aprint_verbose_dev(sc->sc_dev,
   2242 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2243 					    512 << bytecnt, 512 << maxb);
   2244 					pcix_cmd = (pcix_cmd &
   2245 					    ~PCIX_CMD_BYTECNT_MASK) |
   2246 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2247 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2248 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2249 					    pcix_cmd);
   2250 				}
   2251 			}
   2252 		}
   2253 		/*
   2254 		 * The quad port adapter is special; it has a PCIX-PCIX
   2255 		 * bridge on the board, and can run the secondary bus at
   2256 		 * a higher speed.
   2257 		 */
   2258 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2259 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2260 								      : 66;
   2261 		} else if (sc->sc_flags & WM_F_PCIX) {
   2262 			switch (reg & STATUS_PCIXSPD_MASK) {
   2263 			case STATUS_PCIXSPD_50_66:
   2264 				sc->sc_bus_speed = 66;
   2265 				break;
   2266 			case STATUS_PCIXSPD_66_100:
   2267 				sc->sc_bus_speed = 100;
   2268 				break;
   2269 			case STATUS_PCIXSPD_100_133:
   2270 				sc->sc_bus_speed = 133;
   2271 				break;
   2272 			default:
   2273 				aprint_error_dev(sc->sc_dev,
   2274 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2275 				    reg & STATUS_PCIXSPD_MASK);
   2276 				sc->sc_bus_speed = 66;
   2277 				break;
   2278 			}
   2279 		} else
   2280 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2281 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2282 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2283 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2284 	}
   2285 
   2286 	/* clear interesting stat counters */
   2287 	CSR_READ(sc, WMREG_COLC);
   2288 	CSR_READ(sc, WMREG_RXERRC);
   2289 
   2290 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2291 	    || (sc->sc_type >= WM_T_ICH8))
   2292 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2293 	if (sc->sc_type >= WM_T_ICH8)
   2294 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2295 
   2296 	/* Set PHY, NVM mutex related stuff */
   2297 	switch (sc->sc_type) {
   2298 	case WM_T_82542_2_0:
   2299 	case WM_T_82542_2_1:
   2300 	case WM_T_82543:
   2301 	case WM_T_82544:
   2302 		/* Microwire */
   2303 		sc->nvm.read = wm_nvm_read_uwire;
   2304 		sc->sc_nvm_wordsize = 64;
   2305 		sc->sc_nvm_addrbits = 6;
   2306 		break;
   2307 	case WM_T_82540:
   2308 	case WM_T_82545:
   2309 	case WM_T_82545_3:
   2310 	case WM_T_82546:
   2311 	case WM_T_82546_3:
   2312 		/* Microwire */
   2313 		sc->nvm.read = wm_nvm_read_uwire;
   2314 		reg = CSR_READ(sc, WMREG_EECD);
   2315 		if (reg & EECD_EE_SIZE) {
   2316 			sc->sc_nvm_wordsize = 256;
   2317 			sc->sc_nvm_addrbits = 8;
   2318 		} else {
   2319 			sc->sc_nvm_wordsize = 64;
   2320 			sc->sc_nvm_addrbits = 6;
   2321 		}
   2322 		sc->sc_flags |= WM_F_LOCK_EECD;
   2323 		sc->nvm.acquire = wm_get_eecd;
   2324 		sc->nvm.release = wm_put_eecd;
   2325 		break;
   2326 	case WM_T_82541:
   2327 	case WM_T_82541_2:
   2328 	case WM_T_82547:
   2329 	case WM_T_82547_2:
   2330 		reg = CSR_READ(sc, WMREG_EECD);
   2331 		/*
   2332 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2333 		 * on 8254[17], so set flags and functios before calling it.
   2334 		 */
   2335 		sc->sc_flags |= WM_F_LOCK_EECD;
   2336 		sc->nvm.acquire = wm_get_eecd;
   2337 		sc->nvm.release = wm_put_eecd;
   2338 		if (reg & EECD_EE_TYPE) {
   2339 			/* SPI */
   2340 			sc->nvm.read = wm_nvm_read_spi;
   2341 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2342 			wm_nvm_set_addrbits_size_eecd(sc);
   2343 		} else {
   2344 			/* Microwire */
   2345 			sc->nvm.read = wm_nvm_read_uwire;
   2346 			if ((reg & EECD_EE_ABITS) != 0) {
   2347 				sc->sc_nvm_wordsize = 256;
   2348 				sc->sc_nvm_addrbits = 8;
   2349 			} else {
   2350 				sc->sc_nvm_wordsize = 64;
   2351 				sc->sc_nvm_addrbits = 6;
   2352 			}
   2353 		}
   2354 		break;
   2355 	case WM_T_82571:
   2356 	case WM_T_82572:
   2357 		/* SPI */
   2358 		sc->nvm.read = wm_nvm_read_eerd;
   2359 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2360 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2361 		wm_nvm_set_addrbits_size_eecd(sc);
   2362 		sc->phy.acquire = wm_get_swsm_semaphore;
   2363 		sc->phy.release = wm_put_swsm_semaphore;
   2364 		sc->nvm.acquire = wm_get_nvm_82571;
   2365 		sc->nvm.release = wm_put_nvm_82571;
   2366 		break;
   2367 	case WM_T_82573:
   2368 	case WM_T_82574:
   2369 	case WM_T_82583:
   2370 		sc->nvm.read = wm_nvm_read_eerd;
   2371 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2372 		if (sc->sc_type == WM_T_82573) {
   2373 			sc->phy.acquire = wm_get_swsm_semaphore;
   2374 			sc->phy.release = wm_put_swsm_semaphore;
   2375 			sc->nvm.acquire = wm_get_nvm_82571;
   2376 			sc->nvm.release = wm_put_nvm_82571;
   2377 		} else {
   2378 			/* Both PHY and NVM use the same semaphore. */
   2379 			sc->phy.acquire = sc->nvm.acquire
   2380 			    = wm_get_swfwhw_semaphore;
   2381 			sc->phy.release = sc->nvm.release
   2382 			    = wm_put_swfwhw_semaphore;
   2383 		}
   2384 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2385 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2386 			sc->sc_nvm_wordsize = 2048;
   2387 		} else {
   2388 			/* SPI */
   2389 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2390 			wm_nvm_set_addrbits_size_eecd(sc);
   2391 		}
   2392 		break;
   2393 	case WM_T_82575:
   2394 	case WM_T_82576:
   2395 	case WM_T_82580:
   2396 	case WM_T_I350:
   2397 	case WM_T_I354:
   2398 	case WM_T_80003:
   2399 		/* SPI */
   2400 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2401 		wm_nvm_set_addrbits_size_eecd(sc);
   2402 		if ((sc->sc_type == WM_T_80003)
   2403 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2404 			sc->nvm.read = wm_nvm_read_eerd;
   2405 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2406 		} else {
   2407 			sc->nvm.read = wm_nvm_read_spi;
   2408 			sc->sc_flags |= WM_F_LOCK_EECD;
   2409 		}
   2410 		sc->phy.acquire = wm_get_phy_82575;
   2411 		sc->phy.release = wm_put_phy_82575;
   2412 		sc->nvm.acquire = wm_get_nvm_80003;
   2413 		sc->nvm.release = wm_put_nvm_80003;
   2414 		break;
   2415 	case WM_T_ICH8:
   2416 	case WM_T_ICH9:
   2417 	case WM_T_ICH10:
   2418 	case WM_T_PCH:
   2419 	case WM_T_PCH2:
   2420 	case WM_T_PCH_LPT:
   2421 		sc->nvm.read = wm_nvm_read_ich8;
   2422 		/* FLASH */
   2423 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2424 		sc->sc_nvm_wordsize = 2048;
   2425 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2426 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2427 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2428 			aprint_error_dev(sc->sc_dev,
   2429 			    "can't map FLASH registers\n");
   2430 			goto out;
   2431 		}
   2432 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2433 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2434 		    ICH_FLASH_SECTOR_SIZE;
   2435 		sc->sc_ich8_flash_bank_size =
   2436 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2437 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2438 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2439 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2440 		sc->sc_flashreg_offset = 0;
   2441 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2442 		sc->phy.release = wm_put_swflag_ich8lan;
   2443 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2444 		sc->nvm.release = wm_put_nvm_ich8lan;
   2445 		break;
   2446 	case WM_T_PCH_SPT:
   2447 	case WM_T_PCH_CNP:
   2448 		sc->nvm.read = wm_nvm_read_spt;
   2449 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2450 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2451 		sc->sc_flasht = sc->sc_st;
   2452 		sc->sc_flashh = sc->sc_sh;
   2453 		sc->sc_ich8_flash_base = 0;
   2454 		sc->sc_nvm_wordsize =
   2455 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2456 		    * NVM_SIZE_MULTIPLIER;
   2457 		/* It is size in bytes, we want words */
   2458 		sc->sc_nvm_wordsize /= 2;
   2459 		/* Assume 2 banks */
   2460 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2461 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2462 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2463 		sc->phy.release = wm_put_swflag_ich8lan;
   2464 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2465 		sc->nvm.release = wm_put_nvm_ich8lan;
   2466 		break;
   2467 	case WM_T_I210:
   2468 	case WM_T_I211:
   2469 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2470 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2471 		if (wm_nvm_flash_presence_i210(sc)) {
   2472 			sc->nvm.read = wm_nvm_read_eerd;
   2473 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2474 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2475 			wm_nvm_set_addrbits_size_eecd(sc);
   2476 		} else {
   2477 			sc->nvm.read = wm_nvm_read_invm;
   2478 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2479 			sc->sc_nvm_wordsize = INVM_SIZE;
   2480 		}
   2481 		sc->phy.acquire = wm_get_phy_82575;
   2482 		sc->phy.release = wm_put_phy_82575;
   2483 		sc->nvm.acquire = wm_get_nvm_80003;
   2484 		sc->nvm.release = wm_put_nvm_80003;
   2485 		break;
   2486 	default:
   2487 		break;
   2488 	}
   2489 
   2490 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2491 	switch (sc->sc_type) {
   2492 	case WM_T_82571:
   2493 	case WM_T_82572:
   2494 		reg = CSR_READ(sc, WMREG_SWSM2);
   2495 		if ((reg & SWSM2_LOCK) == 0) {
   2496 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2497 			force_clear_smbi = true;
   2498 		} else
   2499 			force_clear_smbi = false;
   2500 		break;
   2501 	case WM_T_82573:
   2502 	case WM_T_82574:
   2503 	case WM_T_82583:
   2504 		force_clear_smbi = true;
   2505 		break;
   2506 	default:
   2507 		force_clear_smbi = false;
   2508 		break;
   2509 	}
   2510 	if (force_clear_smbi) {
   2511 		reg = CSR_READ(sc, WMREG_SWSM);
   2512 		if ((reg & SWSM_SMBI) != 0)
   2513 			aprint_error_dev(sc->sc_dev,
   2514 			    "Please update the Bootagent\n");
   2515 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2516 	}
   2517 
   2518 	/*
   2519 	 * Defer printing the EEPROM type until after verifying the checksum
   2520 	 * This allows the EEPROM type to be printed correctly in the case
   2521 	 * that no EEPROM is attached.
   2522 	 */
   2523 	/*
   2524 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2525 	 * this for later, so we can fail future reads from the EEPROM.
   2526 	 */
   2527 	if (wm_nvm_validate_checksum(sc)) {
   2528 		/*
   2529 		 * Read twice again because some PCI-e parts fail the
   2530 		 * first check due to the link being in sleep state.
   2531 		 */
   2532 		if (wm_nvm_validate_checksum(sc))
   2533 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2534 	}
   2535 
   2536 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2537 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2538 	else {
   2539 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2540 		    sc->sc_nvm_wordsize);
   2541 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2542 			aprint_verbose("iNVM");
   2543 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2544 			aprint_verbose("FLASH(HW)");
   2545 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2546 			aprint_verbose("FLASH");
   2547 		else {
   2548 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2549 				eetype = "SPI";
   2550 			else
   2551 				eetype = "MicroWire";
   2552 			aprint_verbose("(%d address bits) %s EEPROM",
   2553 			    sc->sc_nvm_addrbits, eetype);
   2554 		}
   2555 	}
   2556 	wm_nvm_version(sc);
   2557 	aprint_verbose("\n");
   2558 
   2559 	/*
   2560 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2561 	 * incorrect.
   2562 	 */
   2563 	wm_gmii_setup_phytype(sc, 0, 0);
   2564 
   2565 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2566 	switch (sc->sc_type) {
   2567 	case WM_T_ICH8:
   2568 	case WM_T_ICH9:
   2569 	case WM_T_ICH10:
   2570 	case WM_T_PCH:
   2571 	case WM_T_PCH2:
   2572 	case WM_T_PCH_LPT:
   2573 	case WM_T_PCH_SPT:
   2574 	case WM_T_PCH_CNP:
   2575 		apme_mask = WUC_APME;
   2576 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2577 		if ((eeprom_data & apme_mask) != 0)
   2578 			sc->sc_flags |= WM_F_WOL;
   2579 		break;
   2580 	default:
   2581 		break;
   2582 	}
   2583 
   2584 	/* Reset the chip to a known state. */
   2585 	wm_reset(sc);
   2586 
   2587 	/*
   2588 	 * Check for I21[01] PLL workaround.
   2589 	 *
   2590 	 * Three cases:
   2591 	 * a) Chip is I211.
   2592 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2593 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2594 	 */
   2595 	if (sc->sc_type == WM_T_I211)
   2596 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2597 	if (sc->sc_type == WM_T_I210) {
   2598 		if (!wm_nvm_flash_presence_i210(sc))
   2599 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2600 		else if ((sc->sc_nvm_ver_major < 3)
   2601 		    || ((sc->sc_nvm_ver_major == 3)
   2602 			&& (sc->sc_nvm_ver_minor < 25))) {
   2603 			aprint_verbose_dev(sc->sc_dev,
   2604 			    "ROM image version %d.%d is older than 3.25\n",
   2605 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2606 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2607 		}
   2608 	}
   2609 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2610 		wm_pll_workaround_i210(sc);
   2611 
   2612 	wm_get_wakeup(sc);
   2613 
   2614 	/* Non-AMT based hardware can now take control from firmware */
   2615 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2616 		wm_get_hw_control(sc);
   2617 
   2618 	/*
   2619 	 * Read the Ethernet address from the EEPROM, if not first found
   2620 	 * in device properties.
   2621 	 */
   2622 	ea = prop_dictionary_get(dict, "mac-address");
   2623 	if (ea != NULL) {
   2624 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2625 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2626 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2627 	} else {
   2628 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2629 			aprint_error_dev(sc->sc_dev,
   2630 			    "unable to read Ethernet address\n");
   2631 			goto out;
   2632 		}
   2633 	}
   2634 
   2635 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2636 	    ether_sprintf(enaddr));
   2637 
   2638 	/*
   2639 	 * Read the config info from the EEPROM, and set up various
   2640 	 * bits in the control registers based on their contents.
   2641 	 */
   2642 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2643 	if (pn != NULL) {
   2644 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2645 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2646 	} else {
   2647 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2648 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2649 			goto out;
   2650 		}
   2651 	}
   2652 
   2653 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2654 	if (pn != NULL) {
   2655 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2656 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2657 	} else {
   2658 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2659 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2660 			goto out;
   2661 		}
   2662 	}
   2663 
   2664 	/* check for WM_F_WOL */
   2665 	switch (sc->sc_type) {
   2666 	case WM_T_82542_2_0:
   2667 	case WM_T_82542_2_1:
   2668 	case WM_T_82543:
   2669 		/* dummy? */
   2670 		eeprom_data = 0;
   2671 		apme_mask = NVM_CFG3_APME;
   2672 		break;
   2673 	case WM_T_82544:
   2674 		apme_mask = NVM_CFG2_82544_APM_EN;
   2675 		eeprom_data = cfg2;
   2676 		break;
   2677 	case WM_T_82546:
   2678 	case WM_T_82546_3:
   2679 	case WM_T_82571:
   2680 	case WM_T_82572:
   2681 	case WM_T_82573:
   2682 	case WM_T_82574:
   2683 	case WM_T_82583:
   2684 	case WM_T_80003:
   2685 	case WM_T_82575:
   2686 	case WM_T_82576:
   2687 		apme_mask = NVM_CFG3_APME;
   2688 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2689 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2690 		break;
   2691 	case WM_T_82580:
   2692 	case WM_T_I350:
   2693 	case WM_T_I354:
   2694 	case WM_T_I210:
   2695 	case WM_T_I211:
   2696 		apme_mask = NVM_CFG3_APME;
   2697 		wm_nvm_read(sc,
   2698 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2699 		    1, &eeprom_data);
   2700 		break;
   2701 	case WM_T_ICH8:
   2702 	case WM_T_ICH9:
   2703 	case WM_T_ICH10:
   2704 	case WM_T_PCH:
   2705 	case WM_T_PCH2:
   2706 	case WM_T_PCH_LPT:
   2707 	case WM_T_PCH_SPT:
   2708 	case WM_T_PCH_CNP:
   2709 		/* Already checked before wm_reset () */
   2710 		apme_mask = eeprom_data = 0;
   2711 		break;
   2712 	default: /* XXX 82540 */
   2713 		apme_mask = NVM_CFG3_APME;
   2714 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2715 		break;
   2716 	}
   2717 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2718 	if ((eeprom_data & apme_mask) != 0)
   2719 		sc->sc_flags |= WM_F_WOL;
   2720 
   2721 	/*
   2722 	 * We have the eeprom settings, now apply the special cases
   2723 	 * where the eeprom may be wrong or the board won't support
   2724 	 * wake on lan on a particular port
   2725 	 */
   2726 	switch (sc->sc_pcidevid) {
   2727 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2728 		sc->sc_flags &= ~WM_F_WOL;
   2729 		break;
   2730 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2731 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2732 		/* Wake events only supported on port A for dual fiber
   2733 		 * regardless of eeprom setting */
   2734 		if (sc->sc_funcid == 1)
   2735 			sc->sc_flags &= ~WM_F_WOL;
   2736 		break;
   2737 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2738 		/* If quad port adapter, disable WoL on all but port A */
   2739 		if (sc->sc_funcid != 0)
   2740 			sc->sc_flags &= ~WM_F_WOL;
   2741 		break;
   2742 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2743 		/* Wake events only supported on port A for dual fiber
   2744 		 * regardless of eeprom setting */
   2745 		if (sc->sc_funcid == 1)
   2746 			sc->sc_flags &= ~WM_F_WOL;
   2747 		break;
   2748 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2749 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2750 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2751 		/* If quad port adapter, disable WoL on all but port A */
   2752 		if (sc->sc_funcid != 0)
   2753 			sc->sc_flags &= ~WM_F_WOL;
   2754 		break;
   2755 	}
   2756 
   2757 	if (sc->sc_type >= WM_T_82575) {
   2758 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2759 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2760 			    nvmword);
   2761 			if ((sc->sc_type == WM_T_82575) ||
   2762 			    (sc->sc_type == WM_T_82576)) {
   2763 				/* Check NVM for autonegotiation */
   2764 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2765 				    != 0)
   2766 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2767 			}
   2768 			if ((sc->sc_type == WM_T_82575) ||
   2769 			    (sc->sc_type == WM_T_I350)) {
   2770 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2771 					sc->sc_flags |= WM_F_MAS;
   2772 			}
   2773 		}
   2774 	}
   2775 
   2776 	/*
   2777 	 * XXX need special handling for some multiple port cards
   2778 	 * to disable a paticular port.
   2779 	 */
   2780 
   2781 	if (sc->sc_type >= WM_T_82544) {
   2782 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2783 		if (pn != NULL) {
   2784 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2785 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2786 		} else {
   2787 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2788 				aprint_error_dev(sc->sc_dev,
   2789 				    "unable to read SWDPIN\n");
   2790 				goto out;
   2791 			}
   2792 		}
   2793 	}
   2794 
   2795 	if (cfg1 & NVM_CFG1_ILOS)
   2796 		sc->sc_ctrl |= CTRL_ILOS;
   2797 
   2798 	/*
   2799 	 * XXX
   2800 	 * This code isn't correct because pin 2 and 3 are located
   2801 	 * in different position on newer chips. Check all datasheet.
   2802 	 *
   2803 	 * Until resolve this problem, check if a chip < 82580
   2804 	 */
   2805 	if (sc->sc_type <= WM_T_82580) {
   2806 		if (sc->sc_type >= WM_T_82544) {
   2807 			sc->sc_ctrl |=
   2808 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2809 			    CTRL_SWDPIO_SHIFT;
   2810 			sc->sc_ctrl |=
   2811 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2812 			    CTRL_SWDPINS_SHIFT;
   2813 		} else {
   2814 			sc->sc_ctrl |=
   2815 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2816 			    CTRL_SWDPIO_SHIFT;
   2817 		}
   2818 	}
   2819 
   2820 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2821 		wm_nvm_read(sc,
   2822 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2823 		    1, &nvmword);
   2824 		if (nvmword & NVM_CFG3_ILOS)
   2825 			sc->sc_ctrl |= CTRL_ILOS;
   2826 	}
   2827 
   2828 #if 0
   2829 	if (sc->sc_type >= WM_T_82544) {
   2830 		if (cfg1 & NVM_CFG1_IPS0)
   2831 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2832 		if (cfg1 & NVM_CFG1_IPS1)
   2833 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2834 		sc->sc_ctrl_ext |=
   2835 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2836 		    CTRL_EXT_SWDPIO_SHIFT;
   2837 		sc->sc_ctrl_ext |=
   2838 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2839 		    CTRL_EXT_SWDPINS_SHIFT;
   2840 	} else {
   2841 		sc->sc_ctrl_ext |=
   2842 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2843 		    CTRL_EXT_SWDPIO_SHIFT;
   2844 	}
   2845 #endif
   2846 
   2847 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2848 #if 0
   2849 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2850 #endif
   2851 
   2852 	if (sc->sc_type == WM_T_PCH) {
   2853 		uint16_t val;
   2854 
   2855 		/* Save the NVM K1 bit setting */
   2856 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2857 
   2858 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2859 			sc->sc_nvm_k1_enabled = 1;
   2860 		else
   2861 			sc->sc_nvm_k1_enabled = 0;
   2862 	}
   2863 
   2864 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2865 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2866 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2867 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2868 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2869 	    || sc->sc_type == WM_T_82573
   2870 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2871 		/* Copper only */
   2872 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2873 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2874 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2875 	    || (sc->sc_type ==WM_T_I211)) {
   2876 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2877 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2878 		switch (link_mode) {
   2879 		case CTRL_EXT_LINK_MODE_1000KX:
   2880 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2881 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2882 			break;
   2883 		case CTRL_EXT_LINK_MODE_SGMII:
   2884 			if (wm_sgmii_uses_mdio(sc)) {
   2885 				aprint_normal_dev(sc->sc_dev,
   2886 				    "SGMII(MDIO)\n");
   2887 				sc->sc_flags |= WM_F_SGMII;
   2888 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2889 				break;
   2890 			}
   2891 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2892 			/*FALLTHROUGH*/
   2893 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2894 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2895 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2896 				if (link_mode
   2897 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2898 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2899 					sc->sc_flags |= WM_F_SGMII;
   2900 					aprint_verbose_dev(sc->sc_dev,
   2901 					    "SGMII\n");
   2902 				} else {
   2903 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2904 					aprint_verbose_dev(sc->sc_dev,
   2905 					    "SERDES\n");
   2906 				}
   2907 				break;
   2908 			}
   2909 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2910 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2911 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2912 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2913 				sc->sc_flags |= WM_F_SGMII;
   2914 			}
   2915 			/* Do not change link mode for 100BaseFX */
   2916 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   2917 				break;
   2918 
   2919 			/* Change current link mode setting */
   2920 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2921 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2922 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2923 			else
   2924 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2925 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2926 			break;
   2927 		case CTRL_EXT_LINK_MODE_GMII:
   2928 		default:
   2929 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   2930 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2931 			break;
   2932 		}
   2933 
   2934 		reg &= ~CTRL_EXT_I2C_ENA;
   2935 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2936 			reg |= CTRL_EXT_I2C_ENA;
   2937 		else
   2938 			reg &= ~CTRL_EXT_I2C_ENA;
   2939 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2940 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   2941 			if (!wm_sgmii_uses_mdio(sc))
   2942 				wm_gmii_setup_phytype(sc, 0, 0);
   2943 			wm_reset_mdicnfg_82580(sc);
   2944 		}
   2945 	} else if (sc->sc_type < WM_T_82543 ||
   2946 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2947 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2948 			aprint_error_dev(sc->sc_dev,
   2949 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2950 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2951 		}
   2952 	} else {
   2953 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2954 			aprint_error_dev(sc->sc_dev,
   2955 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2956 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2957 		}
   2958 	}
   2959 
   2960 	if (sc->sc_type >= WM_T_PCH2)
   2961 		sc->sc_flags |= WM_F_EEE;
   2962 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2963 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2964 		/* XXX: Need special handling for I354. (not yet) */
   2965 		if (sc->sc_type != WM_T_I354)
   2966 			sc->sc_flags |= WM_F_EEE;
   2967 	}
   2968 
   2969 	/*
   2970 	 * The I350 has a bug where it always strips the CRC whether
   2971 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   2972 	 */
   2973 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   2974 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   2975 		sc->sc_flags |= WM_F_CRC_STRIP;
   2976 
   2977 	/* Set device properties (macflags) */
   2978 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2979 
   2980 	if (sc->sc_flags != 0) {
   2981 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2982 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2983 	}
   2984 
   2985 #ifdef WM_MPSAFE
   2986 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2987 #else
   2988 	sc->sc_core_lock = NULL;
   2989 #endif
   2990 
   2991 	/* Initialize the media structures accordingly. */
   2992 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2993 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2994 	else
   2995 		wm_tbi_mediainit(sc); /* All others */
   2996 
   2997 	ifp = &sc->sc_ethercom.ec_if;
   2998 	xname = device_xname(sc->sc_dev);
   2999 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   3000 	ifp->if_softc = sc;
   3001 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   3002 #ifdef WM_MPSAFE
   3003 	ifp->if_extflags = IFEF_MPSAFE;
   3004 #endif
   3005 	ifp->if_ioctl = wm_ioctl;
   3006 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3007 		ifp->if_start = wm_nq_start;
   3008 		/*
   3009 		 * When the number of CPUs is one and the controller can use
   3010 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   3011 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   3012 		 * and the other is used for link status changing.
   3013 		 * In this situation, wm_nq_transmit() is disadvantageous
   3014 		 * because of wm_select_txqueue() and pcq(9) overhead.
   3015 		 */
   3016 		if (wm_is_using_multiqueue(sc))
   3017 			ifp->if_transmit = wm_nq_transmit;
   3018 	} else {
   3019 		ifp->if_start = wm_start;
   3020 		/*
   3021 		 * wm_transmit() has the same disadvantage as wm_transmit().
   3022 		 */
   3023 		if (wm_is_using_multiqueue(sc))
   3024 			ifp->if_transmit = wm_transmit;
   3025 	}
   3026 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   3027 	ifp->if_init = wm_init;
   3028 	ifp->if_stop = wm_stop;
   3029 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   3030 	IFQ_SET_READY(&ifp->if_snd);
   3031 
   3032 	/* Check for jumbo frame */
   3033 	switch (sc->sc_type) {
   3034 	case WM_T_82573:
   3035 		/* XXX limited to 9234 if ASPM is disabled */
   3036 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   3037 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   3038 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3039 		break;
   3040 	case WM_T_82571:
   3041 	case WM_T_82572:
   3042 	case WM_T_82574:
   3043 	case WM_T_82583:
   3044 	case WM_T_82575:
   3045 	case WM_T_82576:
   3046 	case WM_T_82580:
   3047 	case WM_T_I350:
   3048 	case WM_T_I354:
   3049 	case WM_T_I210:
   3050 	case WM_T_I211:
   3051 	case WM_T_80003:
   3052 	case WM_T_ICH9:
   3053 	case WM_T_ICH10:
   3054 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   3055 	case WM_T_PCH_LPT:
   3056 	case WM_T_PCH_SPT:
   3057 	case WM_T_PCH_CNP:
   3058 		/* XXX limited to 9234 */
   3059 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3060 		break;
   3061 	case WM_T_PCH:
   3062 		/* XXX limited to 4096 */
   3063 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3064 		break;
   3065 	case WM_T_82542_2_0:
   3066 	case WM_T_82542_2_1:
   3067 	case WM_T_ICH8:
   3068 		/* No support for jumbo frame */
   3069 		break;
   3070 	default:
   3071 		/* ETHER_MAX_LEN_JUMBO */
   3072 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3073 		break;
   3074 	}
   3075 
   3076 	/* If we're a i82543 or greater, we can support VLANs. */
   3077 	if (sc->sc_type >= WM_T_82543) {
   3078 		sc->sc_ethercom.ec_capabilities |=
   3079 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   3080 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   3081 	}
   3082 
   3083 	if ((sc->sc_flags & WM_F_EEE) != 0)
   3084 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   3085 
   3086 	/*
   3087 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   3088 	 * on i82543 and later.
   3089 	 */
   3090 	if (sc->sc_type >= WM_T_82543) {
   3091 		ifp->if_capabilities |=
   3092 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3093 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3094 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3095 		    IFCAP_CSUM_TCPv6_Tx |
   3096 		    IFCAP_CSUM_UDPv6_Tx;
   3097 	}
   3098 
   3099 	/*
   3100 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3101 	 *
   3102 	 *	82541GI (8086:1076) ... no
   3103 	 *	82572EI (8086:10b9) ... yes
   3104 	 */
   3105 	if (sc->sc_type >= WM_T_82571) {
   3106 		ifp->if_capabilities |=
   3107 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3108 	}
   3109 
   3110 	/*
   3111 	 * If we're a i82544 or greater (except i82547), we can do
   3112 	 * TCP segmentation offload.
   3113 	 */
   3114 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   3115 		ifp->if_capabilities |= IFCAP_TSOv4;
   3116 	}
   3117 
   3118 	if (sc->sc_type >= WM_T_82571) {
   3119 		ifp->if_capabilities |= IFCAP_TSOv6;
   3120 	}
   3121 
   3122 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3123 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3124 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3125 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3126 
   3127 	/* Attach the interface. */
   3128 	if_initialize(ifp);
   3129 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3130 	ether_ifattach(ifp, enaddr);
   3131 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3132 	if_register(ifp);
   3133 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3134 	    RND_FLAG_DEFAULT);
   3135 
   3136 #ifdef WM_EVENT_COUNTERS
   3137 	/* Attach event counters. */
   3138 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3139 	    NULL, xname, "linkintr");
   3140 
   3141 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3142 	    NULL, xname, "tx_xoff");
   3143 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3144 	    NULL, xname, "tx_xon");
   3145 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3146 	    NULL, xname, "rx_xoff");
   3147 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3148 	    NULL, xname, "rx_xon");
   3149 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3150 	    NULL, xname, "rx_macctl");
   3151 #endif /* WM_EVENT_COUNTERS */
   3152 
   3153 	sc->sc_txrx_use_workqueue = false;
   3154 
   3155 	if (wm_phy_need_linkdown_discard(sc))
   3156 		wm_set_linkdown_discard(sc);
   3157 
   3158 	wm_init_sysctls(sc);
   3159 
   3160 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3161 		pmf_class_network_register(self, ifp);
   3162 	else
   3163 		aprint_error_dev(self, "couldn't establish power handler\n");
   3164 
   3165 	sc->sc_flags |= WM_F_ATTACHED;
   3166 out:
   3167 	return;
   3168 }
   3169 
   3170 /* The detach function (ca_detach) */
   3171 static int
   3172 wm_detach(device_t self, int flags __unused)
   3173 {
   3174 	struct wm_softc *sc = device_private(self);
   3175 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3176 	int i;
   3177 
   3178 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3179 		return 0;
   3180 
   3181 	/* Stop the interface. Callouts are stopped in it. */
   3182 	wm_stop(ifp, 1);
   3183 
   3184 	pmf_device_deregister(self);
   3185 
   3186 	sysctl_teardown(&sc->sc_sysctllog);
   3187 
   3188 #ifdef WM_EVENT_COUNTERS
   3189 	evcnt_detach(&sc->sc_ev_linkintr);
   3190 
   3191 	evcnt_detach(&sc->sc_ev_tx_xoff);
   3192 	evcnt_detach(&sc->sc_ev_tx_xon);
   3193 	evcnt_detach(&sc->sc_ev_rx_xoff);
   3194 	evcnt_detach(&sc->sc_ev_rx_xon);
   3195 	evcnt_detach(&sc->sc_ev_rx_macctl);
   3196 #endif /* WM_EVENT_COUNTERS */
   3197 
   3198 	rnd_detach_source(&sc->rnd_source);
   3199 
   3200 	/* Tell the firmware about the release */
   3201 	WM_CORE_LOCK(sc);
   3202 	wm_release_manageability(sc);
   3203 	wm_release_hw_control(sc);
   3204 	wm_enable_wakeup(sc);
   3205 	WM_CORE_UNLOCK(sc);
   3206 
   3207 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3208 
   3209 	ether_ifdetach(ifp);
   3210 	if_detach(ifp);
   3211 	if_percpuq_destroy(sc->sc_ipq);
   3212 
   3213 	/* Delete all remaining media. */
   3214 	ifmedia_fini(&sc->sc_mii.mii_media);
   3215 
   3216 	/* Unload RX dmamaps and free mbufs */
   3217 	for (i = 0; i < sc->sc_nqueues; i++) {
   3218 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3219 		mutex_enter(rxq->rxq_lock);
   3220 		wm_rxdrain(rxq);
   3221 		mutex_exit(rxq->rxq_lock);
   3222 	}
   3223 	/* Must unlock here */
   3224 
   3225 	/* Disestablish the interrupt handler */
   3226 	for (i = 0; i < sc->sc_nintrs; i++) {
   3227 		if (sc->sc_ihs[i] != NULL) {
   3228 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3229 			sc->sc_ihs[i] = NULL;
   3230 		}
   3231 	}
   3232 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3233 
   3234 	/* wm_stop() ensure workqueue is stopped. */
   3235 	workqueue_destroy(sc->sc_queue_wq);
   3236 
   3237 	for (i = 0; i < sc->sc_nqueues; i++)
   3238 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3239 
   3240 	wm_free_txrx_queues(sc);
   3241 
   3242 	/* Unmap the registers */
   3243 	if (sc->sc_ss) {
   3244 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3245 		sc->sc_ss = 0;
   3246 	}
   3247 	if (sc->sc_ios) {
   3248 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3249 		sc->sc_ios = 0;
   3250 	}
   3251 	if (sc->sc_flashs) {
   3252 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3253 		sc->sc_flashs = 0;
   3254 	}
   3255 
   3256 	if (sc->sc_core_lock)
   3257 		mutex_obj_free(sc->sc_core_lock);
   3258 	if (sc->sc_ich_phymtx)
   3259 		mutex_obj_free(sc->sc_ich_phymtx);
   3260 	if (sc->sc_ich_nvmmtx)
   3261 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3262 
   3263 	return 0;
   3264 }
   3265 
   3266 static bool
   3267 wm_suspend(device_t self, const pmf_qual_t *qual)
   3268 {
   3269 	struct wm_softc *sc = device_private(self);
   3270 
   3271 	wm_release_manageability(sc);
   3272 	wm_release_hw_control(sc);
   3273 	wm_enable_wakeup(sc);
   3274 
   3275 	return true;
   3276 }
   3277 
   3278 static bool
   3279 wm_resume(device_t self, const pmf_qual_t *qual)
   3280 {
   3281 	struct wm_softc *sc = device_private(self);
   3282 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3283 	pcireg_t reg;
   3284 	char buf[256];
   3285 
   3286 	reg = CSR_READ(sc, WMREG_WUS);
   3287 	if (reg != 0) {
   3288 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3289 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3290 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3291 	}
   3292 
   3293 	if (sc->sc_type >= WM_T_PCH2)
   3294 		wm_resume_workarounds_pchlan(sc);
   3295 	if ((ifp->if_flags & IFF_UP) == 0) {
   3296 		/* >= PCH_SPT hardware workaround before reset. */
   3297 		if (sc->sc_type >= WM_T_PCH_SPT)
   3298 			wm_flush_desc_rings(sc);
   3299 
   3300 		wm_reset(sc);
   3301 		/* Non-AMT based hardware can now take control from firmware */
   3302 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3303 			wm_get_hw_control(sc);
   3304 		wm_init_manageability(sc);
   3305 	} else {
   3306 		/*
   3307 		 * We called pmf_class_network_register(), so if_init() is
   3308 		 * automatically called when IFF_UP. wm_reset(),
   3309 		 * wm_get_hw_control() and wm_init_manageability() are called
   3310 		 * via wm_init().
   3311 		 */
   3312 	}
   3313 
   3314 	return true;
   3315 }
   3316 
   3317 /*
   3318  * wm_watchdog:		[ifnet interface function]
   3319  *
   3320  *	Watchdog timer handler.
   3321  */
   3322 static void
   3323 wm_watchdog(struct ifnet *ifp)
   3324 {
   3325 	int qid;
   3326 	struct wm_softc *sc = ifp->if_softc;
   3327 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3328 
   3329 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3330 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3331 
   3332 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3333 	}
   3334 
   3335 	/* IF any of queues hanged up, reset the interface. */
   3336 	if (hang_queue != 0) {
   3337 		(void)wm_init(ifp);
   3338 
   3339 		/*
   3340 		 * There are still some upper layer processing which call
   3341 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3342 		 */
   3343 		/* Try to get more packets going. */
   3344 		ifp->if_start(ifp);
   3345 	}
   3346 }
   3347 
   3348 
   3349 static void
   3350 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3351 {
   3352 
   3353 	mutex_enter(txq->txq_lock);
   3354 	if (txq->txq_sending &&
   3355 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3356 		wm_watchdog_txq_locked(ifp, txq, hang);
   3357 
   3358 	mutex_exit(txq->txq_lock);
   3359 }
   3360 
   3361 static void
   3362 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3363     uint16_t *hang)
   3364 {
   3365 	struct wm_softc *sc = ifp->if_softc;
   3366 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3367 
   3368 	KASSERT(mutex_owned(txq->txq_lock));
   3369 
   3370 	/*
   3371 	 * Since we're using delayed interrupts, sweep up
   3372 	 * before we report an error.
   3373 	 */
   3374 	wm_txeof(txq, UINT_MAX);
   3375 
   3376 	if (txq->txq_sending)
   3377 		*hang |= __BIT(wmq->wmq_id);
   3378 
   3379 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3380 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3381 		    device_xname(sc->sc_dev));
   3382 	} else {
   3383 #ifdef WM_DEBUG
   3384 		int i, j;
   3385 		struct wm_txsoft *txs;
   3386 #endif
   3387 		log(LOG_ERR,
   3388 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3389 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3390 		    txq->txq_next);
   3391 		if_statinc(ifp, if_oerrors);
   3392 #ifdef WM_DEBUG
   3393 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3394 		    i = WM_NEXTTXS(txq, i)) {
   3395 			txs = &txq->txq_soft[i];
   3396 			printf("txs %d tx %d -> %d\n",
   3397 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3398 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3399 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3400 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3401 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3402 					printf("\t %#08x%08x\n",
   3403 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3404 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3405 				} else {
   3406 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3407 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3408 					    txq->txq_descs[j].wtx_addr.wa_low);
   3409 					printf("\t %#04x%02x%02x%08x\n",
   3410 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3411 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3412 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3413 					    txq->txq_descs[j].wtx_cmdlen);
   3414 				}
   3415 				if (j == txs->txs_lastdesc)
   3416 					break;
   3417 			}
   3418 		}
   3419 #endif
   3420 	}
   3421 }
   3422 
   3423 /*
   3424  * wm_tick:
   3425  *
   3426  *	One second timer, used to check link status, sweep up
   3427  *	completed transmit jobs, etc.
   3428  */
   3429 static void
   3430 wm_tick(void *arg)
   3431 {
   3432 	struct wm_softc *sc = arg;
   3433 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3434 #ifndef WM_MPSAFE
   3435 	int s = splnet();
   3436 #endif
   3437 
   3438 	WM_CORE_LOCK(sc);
   3439 
   3440 	if (sc->sc_core_stopping) {
   3441 		WM_CORE_UNLOCK(sc);
   3442 #ifndef WM_MPSAFE
   3443 		splx(s);
   3444 #endif
   3445 		return;
   3446 	}
   3447 
   3448 	if (sc->sc_type >= WM_T_82542_2_1) {
   3449 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3450 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3451 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3452 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3453 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3454 	}
   3455 
   3456 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   3457 	if_statadd_ref(nsr, if_collisions, CSR_READ(sc, WMREG_COLC));
   3458 	if_statadd_ref(nsr, if_ierrors, 0ULL /* ensure quad_t */
   3459 	    + CSR_READ(sc, WMREG_CRCERRS)
   3460 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3461 	    + CSR_READ(sc, WMREG_SYMERRC)
   3462 	    + CSR_READ(sc, WMREG_RXERRC)
   3463 	    + CSR_READ(sc, WMREG_SEC)
   3464 	    + CSR_READ(sc, WMREG_CEXTERR)
   3465 	    + CSR_READ(sc, WMREG_RLEC));
   3466 	/*
   3467 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3468 	 * memory. It does not mean the number of dropped packet. Because
   3469 	 * ethernet controller can receive packets in such case if there is
   3470 	 * space in phy's FIFO.
   3471 	 *
   3472 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3473 	 * own EVCNT instead of if_iqdrops.
   3474 	 */
   3475 	if_statadd_ref(nsr, if_iqdrops, CSR_READ(sc, WMREG_MPC));
   3476 	IF_STAT_PUTREF(ifp);
   3477 
   3478 	if (sc->sc_flags & WM_F_HAS_MII)
   3479 		mii_tick(&sc->sc_mii);
   3480 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3481 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3482 		wm_serdes_tick(sc);
   3483 	else
   3484 		wm_tbi_tick(sc);
   3485 
   3486 	WM_CORE_UNLOCK(sc);
   3487 
   3488 	wm_watchdog(ifp);
   3489 
   3490 	callout_schedule(&sc->sc_tick_ch, hz);
   3491 }
   3492 
   3493 static int
   3494 wm_ifflags_cb(struct ethercom *ec)
   3495 {
   3496 	struct ifnet *ifp = &ec->ec_if;
   3497 	struct wm_softc *sc = ifp->if_softc;
   3498 	u_short iffchange;
   3499 	int ecchange;
   3500 	bool needreset = false;
   3501 	int rc = 0;
   3502 
   3503 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3504 		device_xname(sc->sc_dev), __func__));
   3505 
   3506 	WM_CORE_LOCK(sc);
   3507 
   3508 	/*
   3509 	 * Check for if_flags.
   3510 	 * Main usage is to prevent linkdown when opening bpf.
   3511 	 */
   3512 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3513 	sc->sc_if_flags = ifp->if_flags;
   3514 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3515 		needreset = true;
   3516 		goto ec;
   3517 	}
   3518 
   3519 	/* iff related updates */
   3520 	if ((iffchange & IFF_PROMISC) != 0)
   3521 		wm_set_filter(sc);
   3522 
   3523 	wm_set_vlan(sc);
   3524 
   3525 ec:
   3526 	/* Check for ec_capenable. */
   3527 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3528 	sc->sc_ec_capenable = ec->ec_capenable;
   3529 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3530 		needreset = true;
   3531 		goto out;
   3532 	}
   3533 
   3534 	/* ec related updates */
   3535 	wm_set_eee(sc);
   3536 
   3537 out:
   3538 	if (needreset)
   3539 		rc = ENETRESET;
   3540 	WM_CORE_UNLOCK(sc);
   3541 
   3542 	return rc;
   3543 }
   3544 
   3545 static bool
   3546 wm_phy_need_linkdown_discard(struct wm_softc *sc)
   3547 {
   3548 
   3549 	switch (sc->sc_phytype) {
   3550 	case WMPHY_82577: /* ihphy */
   3551 	case WMPHY_82578: /* atphy */
   3552 	case WMPHY_82579: /* ihphy */
   3553 	case WMPHY_I217: /* ihphy */
   3554 	case WMPHY_82580: /* ihphy */
   3555 	case WMPHY_I350: /* ihphy */
   3556 		return true;
   3557 	default:
   3558 		return false;
   3559 	}
   3560 }
   3561 
   3562 static void
   3563 wm_set_linkdown_discard(struct wm_softc *sc)
   3564 {
   3565 
   3566 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3567 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3568 
   3569 		mutex_enter(txq->txq_lock);
   3570 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
   3571 		mutex_exit(txq->txq_lock);
   3572 	}
   3573 }
   3574 
   3575 static void
   3576 wm_clear_linkdown_discard(struct wm_softc *sc)
   3577 {
   3578 
   3579 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3580 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3581 
   3582 		mutex_enter(txq->txq_lock);
   3583 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
   3584 		mutex_exit(txq->txq_lock);
   3585 	}
   3586 }
   3587 
   3588 /*
   3589  * wm_ioctl:		[ifnet interface function]
   3590  *
   3591  *	Handle control requests from the operator.
   3592  */
   3593 static int
   3594 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3595 {
   3596 	struct wm_softc *sc = ifp->if_softc;
   3597 	struct ifreq *ifr = (struct ifreq *)data;
   3598 	struct ifaddr *ifa = (struct ifaddr *)data;
   3599 	struct sockaddr_dl *sdl;
   3600 	int s, error;
   3601 
   3602 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3603 		device_xname(sc->sc_dev), __func__));
   3604 
   3605 #ifndef WM_MPSAFE
   3606 	s = splnet();
   3607 #endif
   3608 	switch (cmd) {
   3609 	case SIOCSIFMEDIA:
   3610 		WM_CORE_LOCK(sc);
   3611 		/* Flow control requires full-duplex mode. */
   3612 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3613 		    (ifr->ifr_media & IFM_FDX) == 0)
   3614 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3615 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3616 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3617 				/* We can do both TXPAUSE and RXPAUSE. */
   3618 				ifr->ifr_media |=
   3619 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3620 			}
   3621 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3622 		}
   3623 		WM_CORE_UNLOCK(sc);
   3624 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3625 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
   3626 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE)
   3627 				wm_set_linkdown_discard(sc);
   3628 			else
   3629 				wm_clear_linkdown_discard(sc);
   3630 		}
   3631 		break;
   3632 	case SIOCINITIFADDR:
   3633 		WM_CORE_LOCK(sc);
   3634 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3635 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3636 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3637 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3638 			/* Unicast address is the first multicast entry */
   3639 			wm_set_filter(sc);
   3640 			error = 0;
   3641 			WM_CORE_UNLOCK(sc);
   3642 			break;
   3643 		}
   3644 		WM_CORE_UNLOCK(sc);
   3645 		if (((ifp->if_flags & IFF_UP) == 0) && wm_phy_need_linkdown_discard(sc))
   3646 			wm_clear_linkdown_discard(sc);
   3647 		/*FALLTHROUGH*/
   3648 	default:
   3649 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
   3650 			if (((ifp->if_flags & IFF_UP) == 0) && ((ifr->ifr_flags & IFF_UP) != 0)) {
   3651 				wm_clear_linkdown_discard(sc);
   3652 			} else if (((ifp->if_flags & IFF_UP) != 0) && ((ifr->ifr_flags & IFF_UP) == 0)) {
   3653 				wm_set_linkdown_discard(sc);
   3654 			}
   3655 		}
   3656 #ifdef WM_MPSAFE
   3657 		s = splnet();
   3658 #endif
   3659 		/* It may call wm_start, so unlock here */
   3660 		error = ether_ioctl(ifp, cmd, data);
   3661 #ifdef WM_MPSAFE
   3662 		splx(s);
   3663 #endif
   3664 		if (error != ENETRESET)
   3665 			break;
   3666 
   3667 		error = 0;
   3668 
   3669 		if (cmd == SIOCSIFCAP)
   3670 			error = (*ifp->if_init)(ifp);
   3671 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3672 			;
   3673 		else if (ifp->if_flags & IFF_RUNNING) {
   3674 			/*
   3675 			 * Multicast list has changed; set the hardware filter
   3676 			 * accordingly.
   3677 			 */
   3678 			WM_CORE_LOCK(sc);
   3679 			wm_set_filter(sc);
   3680 			WM_CORE_UNLOCK(sc);
   3681 		}
   3682 		break;
   3683 	}
   3684 
   3685 #ifndef WM_MPSAFE
   3686 	splx(s);
   3687 #endif
   3688 	return error;
   3689 }
   3690 
   3691 /* MAC address related */
   3692 
   3693 /*
   3694  * Get the offset of MAC address and return it.
   3695  * If error occured, use offset 0.
   3696  */
   3697 static uint16_t
   3698 wm_check_alt_mac_addr(struct wm_softc *sc)
   3699 {
   3700 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3701 	uint16_t offset = NVM_OFF_MACADDR;
   3702 
   3703 	/* Try to read alternative MAC address pointer */
   3704 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3705 		return 0;
   3706 
   3707 	/* Check pointer if it's valid or not. */
   3708 	if ((offset == 0x0000) || (offset == 0xffff))
   3709 		return 0;
   3710 
   3711 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3712 	/*
   3713 	 * Check whether alternative MAC address is valid or not.
   3714 	 * Some cards have non 0xffff pointer but those don't use
   3715 	 * alternative MAC address in reality.
   3716 	 *
   3717 	 * Check whether the broadcast bit is set or not.
   3718 	 */
   3719 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3720 		if (((myea[0] & 0xff) & 0x01) == 0)
   3721 			return offset; /* Found */
   3722 
   3723 	/* Not found */
   3724 	return 0;
   3725 }
   3726 
   3727 static int
   3728 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3729 {
   3730 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3731 	uint16_t offset = NVM_OFF_MACADDR;
   3732 	int do_invert = 0;
   3733 
   3734 	switch (sc->sc_type) {
   3735 	case WM_T_82580:
   3736 	case WM_T_I350:
   3737 	case WM_T_I354:
   3738 		/* EEPROM Top Level Partitioning */
   3739 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3740 		break;
   3741 	case WM_T_82571:
   3742 	case WM_T_82575:
   3743 	case WM_T_82576:
   3744 	case WM_T_80003:
   3745 	case WM_T_I210:
   3746 	case WM_T_I211:
   3747 		offset = wm_check_alt_mac_addr(sc);
   3748 		if (offset == 0)
   3749 			if ((sc->sc_funcid & 0x01) == 1)
   3750 				do_invert = 1;
   3751 		break;
   3752 	default:
   3753 		if ((sc->sc_funcid & 0x01) == 1)
   3754 			do_invert = 1;
   3755 		break;
   3756 	}
   3757 
   3758 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3759 		goto bad;
   3760 
   3761 	enaddr[0] = myea[0] & 0xff;
   3762 	enaddr[1] = myea[0] >> 8;
   3763 	enaddr[2] = myea[1] & 0xff;
   3764 	enaddr[3] = myea[1] >> 8;
   3765 	enaddr[4] = myea[2] & 0xff;
   3766 	enaddr[5] = myea[2] >> 8;
   3767 
   3768 	/*
   3769 	 * Toggle the LSB of the MAC address on the second port
   3770 	 * of some dual port cards.
   3771 	 */
   3772 	if (do_invert != 0)
   3773 		enaddr[5] ^= 1;
   3774 
   3775 	return 0;
   3776 
   3777  bad:
   3778 	return -1;
   3779 }
   3780 
   3781 /*
   3782  * wm_set_ral:
   3783  *
   3784  *	Set an entery in the receive address list.
   3785  */
   3786 static void
   3787 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3788 {
   3789 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3790 	uint32_t wlock_mac;
   3791 	int rv;
   3792 
   3793 	if (enaddr != NULL) {
   3794 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   3795 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   3796 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   3797 		ral_hi |= RAL_AV;
   3798 	} else {
   3799 		ral_lo = 0;
   3800 		ral_hi = 0;
   3801 	}
   3802 
   3803 	switch (sc->sc_type) {
   3804 	case WM_T_82542_2_0:
   3805 	case WM_T_82542_2_1:
   3806 	case WM_T_82543:
   3807 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3808 		CSR_WRITE_FLUSH(sc);
   3809 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3810 		CSR_WRITE_FLUSH(sc);
   3811 		break;
   3812 	case WM_T_PCH2:
   3813 	case WM_T_PCH_LPT:
   3814 	case WM_T_PCH_SPT:
   3815 	case WM_T_PCH_CNP:
   3816 		if (idx == 0) {
   3817 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3818 			CSR_WRITE_FLUSH(sc);
   3819 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3820 			CSR_WRITE_FLUSH(sc);
   3821 			return;
   3822 		}
   3823 		if (sc->sc_type != WM_T_PCH2) {
   3824 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3825 			    FWSM_WLOCK_MAC);
   3826 			addrl = WMREG_SHRAL(idx - 1);
   3827 			addrh = WMREG_SHRAH(idx - 1);
   3828 		} else {
   3829 			wlock_mac = 0;
   3830 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3831 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3832 		}
   3833 
   3834 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3835 			rv = wm_get_swflag_ich8lan(sc);
   3836 			if (rv != 0)
   3837 				return;
   3838 			CSR_WRITE(sc, addrl, ral_lo);
   3839 			CSR_WRITE_FLUSH(sc);
   3840 			CSR_WRITE(sc, addrh, ral_hi);
   3841 			CSR_WRITE_FLUSH(sc);
   3842 			wm_put_swflag_ich8lan(sc);
   3843 		}
   3844 
   3845 		break;
   3846 	default:
   3847 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3848 		CSR_WRITE_FLUSH(sc);
   3849 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3850 		CSR_WRITE_FLUSH(sc);
   3851 		break;
   3852 	}
   3853 }
   3854 
   3855 /*
   3856  * wm_mchash:
   3857  *
   3858  *	Compute the hash of the multicast address for the 4096-bit
   3859  *	multicast filter.
   3860  */
   3861 static uint32_t
   3862 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3863 {
   3864 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3865 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3866 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3867 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3868 	uint32_t hash;
   3869 
   3870 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3871 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3872 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3873 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3874 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3875 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3876 		return (hash & 0x3ff);
   3877 	}
   3878 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3879 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3880 
   3881 	return (hash & 0xfff);
   3882 }
   3883 
   3884 /*
   3885  *
   3886  *
   3887  */
   3888 static int
   3889 wm_rar_count(struct wm_softc *sc)
   3890 {
   3891 	int size;
   3892 
   3893 	switch (sc->sc_type) {
   3894 	case WM_T_ICH8:
   3895 		size = WM_RAL_TABSIZE_ICH8 -1;
   3896 		break;
   3897 	case WM_T_ICH9:
   3898 	case WM_T_ICH10:
   3899 	case WM_T_PCH:
   3900 		size = WM_RAL_TABSIZE_ICH8;
   3901 		break;
   3902 	case WM_T_PCH2:
   3903 		size = WM_RAL_TABSIZE_PCH2;
   3904 		break;
   3905 	case WM_T_PCH_LPT:
   3906 	case WM_T_PCH_SPT:
   3907 	case WM_T_PCH_CNP:
   3908 		size = WM_RAL_TABSIZE_PCH_LPT;
   3909 		break;
   3910 	case WM_T_82575:
   3911 	case WM_T_I210:
   3912 	case WM_T_I211:
   3913 		size = WM_RAL_TABSIZE_82575;
   3914 		break;
   3915 	case WM_T_82576:
   3916 	case WM_T_82580:
   3917 		size = WM_RAL_TABSIZE_82576;
   3918 		break;
   3919 	case WM_T_I350:
   3920 	case WM_T_I354:
   3921 		size = WM_RAL_TABSIZE_I350;
   3922 		break;
   3923 	default:
   3924 		size = WM_RAL_TABSIZE;
   3925 	}
   3926 
   3927 	return size;
   3928 }
   3929 
   3930 /*
   3931  * wm_set_filter:
   3932  *
   3933  *	Set up the receive filter.
   3934  */
   3935 static void
   3936 wm_set_filter(struct wm_softc *sc)
   3937 {
   3938 	struct ethercom *ec = &sc->sc_ethercom;
   3939 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3940 	struct ether_multi *enm;
   3941 	struct ether_multistep step;
   3942 	bus_addr_t mta_reg;
   3943 	uint32_t hash, reg, bit;
   3944 	int i, size, ralmax, rv;
   3945 
   3946 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3947 		device_xname(sc->sc_dev), __func__));
   3948 
   3949 	if (sc->sc_type >= WM_T_82544)
   3950 		mta_reg = WMREG_CORDOVA_MTA;
   3951 	else
   3952 		mta_reg = WMREG_MTA;
   3953 
   3954 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3955 
   3956 	if (ifp->if_flags & IFF_BROADCAST)
   3957 		sc->sc_rctl |= RCTL_BAM;
   3958 	if (ifp->if_flags & IFF_PROMISC) {
   3959 		sc->sc_rctl |= RCTL_UPE;
   3960 		ETHER_LOCK(ec);
   3961 		ec->ec_flags |= ETHER_F_ALLMULTI;
   3962 		ETHER_UNLOCK(ec);
   3963 		goto allmulti;
   3964 	}
   3965 
   3966 	/*
   3967 	 * Set the station address in the first RAL slot, and
   3968 	 * clear the remaining slots.
   3969 	 */
   3970 	size = wm_rar_count(sc);
   3971 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3972 
   3973 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3974 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3975 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3976 		switch (i) {
   3977 		case 0:
   3978 			/* We can use all entries */
   3979 			ralmax = size;
   3980 			break;
   3981 		case 1:
   3982 			/* Only RAR[0] */
   3983 			ralmax = 1;
   3984 			break;
   3985 		default:
   3986 			/* Available SHRA + RAR[0] */
   3987 			ralmax = i + 1;
   3988 		}
   3989 	} else
   3990 		ralmax = size;
   3991 	for (i = 1; i < size; i++) {
   3992 		if (i < ralmax)
   3993 			wm_set_ral(sc, NULL, i);
   3994 	}
   3995 
   3996 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3997 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3998 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3999 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   4000 		size = WM_ICH8_MC_TABSIZE;
   4001 	else
   4002 		size = WM_MC_TABSIZE;
   4003 	/* Clear out the multicast table. */
   4004 	for (i = 0; i < size; i++) {
   4005 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   4006 		CSR_WRITE_FLUSH(sc);
   4007 	}
   4008 
   4009 	ETHER_LOCK(ec);
   4010 	ETHER_FIRST_MULTI(step, ec, enm);
   4011 	while (enm != NULL) {
   4012 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   4013 			ec->ec_flags |= ETHER_F_ALLMULTI;
   4014 			ETHER_UNLOCK(ec);
   4015 			/*
   4016 			 * We must listen to a range of multicast addresses.
   4017 			 * For now, just accept all multicasts, rather than
   4018 			 * trying to set only those filter bits needed to match
   4019 			 * the range.  (At this time, the only use of address
   4020 			 * ranges is for IP multicast routing, for which the
   4021 			 * range is big enough to require all bits set.)
   4022 			 */
   4023 			goto allmulti;
   4024 		}
   4025 
   4026 		hash = wm_mchash(sc, enm->enm_addrlo);
   4027 
   4028 		reg = (hash >> 5);
   4029 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4030 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4031 		    || (sc->sc_type == WM_T_PCH2)
   4032 		    || (sc->sc_type == WM_T_PCH_LPT)
   4033 		    || (sc->sc_type == WM_T_PCH_SPT)
   4034 		    || (sc->sc_type == WM_T_PCH_CNP))
   4035 			reg &= 0x1f;
   4036 		else
   4037 			reg &= 0x7f;
   4038 		bit = hash & 0x1f;
   4039 
   4040 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   4041 		hash |= 1U << bit;
   4042 
   4043 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   4044 			/*
   4045 			 * 82544 Errata 9: Certain register cannot be written
   4046 			 * with particular alignments in PCI-X bus operation
   4047 			 * (FCAH, MTA and VFTA).
   4048 			 */
   4049 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   4050 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4051 			CSR_WRITE_FLUSH(sc);
   4052 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   4053 			CSR_WRITE_FLUSH(sc);
   4054 		} else {
   4055 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4056 			CSR_WRITE_FLUSH(sc);
   4057 		}
   4058 
   4059 		ETHER_NEXT_MULTI(step, enm);
   4060 	}
   4061 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4062 	ETHER_UNLOCK(ec);
   4063 
   4064 	goto setit;
   4065 
   4066  allmulti:
   4067 	sc->sc_rctl |= RCTL_MPE;
   4068 
   4069  setit:
   4070 	if (sc->sc_type >= WM_T_PCH2) {
   4071 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4072 		    && (ifp->if_mtu > ETHERMTU))
   4073 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
   4074 		else
   4075 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
   4076 		if (rv != 0)
   4077 			device_printf(sc->sc_dev,
   4078 			    "Failed to do workaround for jumbo frame.\n");
   4079 	}
   4080 
   4081 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   4082 }
   4083 
   4084 /* Reset and init related */
   4085 
   4086 static void
   4087 wm_set_vlan(struct wm_softc *sc)
   4088 {
   4089 
   4090 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4091 		device_xname(sc->sc_dev), __func__));
   4092 
   4093 	/* Deal with VLAN enables. */
   4094 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   4095 		sc->sc_ctrl |= CTRL_VME;
   4096 	else
   4097 		sc->sc_ctrl &= ~CTRL_VME;
   4098 
   4099 	/* Write the control registers. */
   4100 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4101 }
   4102 
   4103 static void
   4104 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   4105 {
   4106 	uint32_t gcr;
   4107 	pcireg_t ctrl2;
   4108 
   4109 	gcr = CSR_READ(sc, WMREG_GCR);
   4110 
   4111 	/* Only take action if timeout value is defaulted to 0 */
   4112 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   4113 		goto out;
   4114 
   4115 	if ((gcr & GCR_CAP_VER2) == 0) {
   4116 		gcr |= GCR_CMPL_TMOUT_10MS;
   4117 		goto out;
   4118 	}
   4119 
   4120 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4121 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   4122 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   4123 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4124 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   4125 
   4126 out:
   4127 	/* Disable completion timeout resend */
   4128 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   4129 
   4130 	CSR_WRITE(sc, WMREG_GCR, gcr);
   4131 }
   4132 
   4133 void
   4134 wm_get_auto_rd_done(struct wm_softc *sc)
   4135 {
   4136 	int i;
   4137 
   4138 	/* wait for eeprom to reload */
   4139 	switch (sc->sc_type) {
   4140 	case WM_T_82571:
   4141 	case WM_T_82572:
   4142 	case WM_T_82573:
   4143 	case WM_T_82574:
   4144 	case WM_T_82583:
   4145 	case WM_T_82575:
   4146 	case WM_T_82576:
   4147 	case WM_T_82580:
   4148 	case WM_T_I350:
   4149 	case WM_T_I354:
   4150 	case WM_T_I210:
   4151 	case WM_T_I211:
   4152 	case WM_T_80003:
   4153 	case WM_T_ICH8:
   4154 	case WM_T_ICH9:
   4155 		for (i = 0; i < 10; i++) {
   4156 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   4157 				break;
   4158 			delay(1000);
   4159 		}
   4160 		if (i == 10) {
   4161 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4162 			    "complete\n", device_xname(sc->sc_dev));
   4163 		}
   4164 		break;
   4165 	default:
   4166 		break;
   4167 	}
   4168 }
   4169 
   4170 void
   4171 wm_lan_init_done(struct wm_softc *sc)
   4172 {
   4173 	uint32_t reg = 0;
   4174 	int i;
   4175 
   4176 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4177 		device_xname(sc->sc_dev), __func__));
   4178 
   4179 	/* Wait for eeprom to reload */
   4180 	switch (sc->sc_type) {
   4181 	case WM_T_ICH10:
   4182 	case WM_T_PCH:
   4183 	case WM_T_PCH2:
   4184 	case WM_T_PCH_LPT:
   4185 	case WM_T_PCH_SPT:
   4186 	case WM_T_PCH_CNP:
   4187 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4188 			reg = CSR_READ(sc, WMREG_STATUS);
   4189 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4190 				break;
   4191 			delay(100);
   4192 		}
   4193 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4194 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4195 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4196 		}
   4197 		break;
   4198 	default:
   4199 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4200 		    __func__);
   4201 		break;
   4202 	}
   4203 
   4204 	reg &= ~STATUS_LAN_INIT_DONE;
   4205 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4206 }
   4207 
   4208 void
   4209 wm_get_cfg_done(struct wm_softc *sc)
   4210 {
   4211 	int mask;
   4212 	uint32_t reg;
   4213 	int i;
   4214 
   4215 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4216 		device_xname(sc->sc_dev), __func__));
   4217 
   4218 	/* Wait for eeprom to reload */
   4219 	switch (sc->sc_type) {
   4220 	case WM_T_82542_2_0:
   4221 	case WM_T_82542_2_1:
   4222 		/* null */
   4223 		break;
   4224 	case WM_T_82543:
   4225 	case WM_T_82544:
   4226 	case WM_T_82540:
   4227 	case WM_T_82545:
   4228 	case WM_T_82545_3:
   4229 	case WM_T_82546:
   4230 	case WM_T_82546_3:
   4231 	case WM_T_82541:
   4232 	case WM_T_82541_2:
   4233 	case WM_T_82547:
   4234 	case WM_T_82547_2:
   4235 	case WM_T_82573:
   4236 	case WM_T_82574:
   4237 	case WM_T_82583:
   4238 		/* generic */
   4239 		delay(10*1000);
   4240 		break;
   4241 	case WM_T_80003:
   4242 	case WM_T_82571:
   4243 	case WM_T_82572:
   4244 	case WM_T_82575:
   4245 	case WM_T_82576:
   4246 	case WM_T_82580:
   4247 	case WM_T_I350:
   4248 	case WM_T_I354:
   4249 	case WM_T_I210:
   4250 	case WM_T_I211:
   4251 		if (sc->sc_type == WM_T_82571) {
   4252 			/* Only 82571 shares port 0 */
   4253 			mask = EEMNGCTL_CFGDONE_0;
   4254 		} else
   4255 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4256 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4257 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4258 				break;
   4259 			delay(1000);
   4260 		}
   4261 		if (i >= WM_PHY_CFG_TIMEOUT)
   4262 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
   4263 				device_xname(sc->sc_dev), __func__));
   4264 		break;
   4265 	case WM_T_ICH8:
   4266 	case WM_T_ICH9:
   4267 	case WM_T_ICH10:
   4268 	case WM_T_PCH:
   4269 	case WM_T_PCH2:
   4270 	case WM_T_PCH_LPT:
   4271 	case WM_T_PCH_SPT:
   4272 	case WM_T_PCH_CNP:
   4273 		delay(10*1000);
   4274 		if (sc->sc_type >= WM_T_ICH10)
   4275 			wm_lan_init_done(sc);
   4276 		else
   4277 			wm_get_auto_rd_done(sc);
   4278 
   4279 		/* Clear PHY Reset Asserted bit */
   4280 		reg = CSR_READ(sc, WMREG_STATUS);
   4281 		if ((reg & STATUS_PHYRA) != 0)
   4282 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4283 		break;
   4284 	default:
   4285 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4286 		    __func__);
   4287 		break;
   4288 	}
   4289 }
   4290 
   4291 int
   4292 wm_phy_post_reset(struct wm_softc *sc)
   4293 {
   4294 	device_t dev = sc->sc_dev;
   4295 	uint16_t reg;
   4296 	int rv = 0;
   4297 
   4298 	/* This function is only for ICH8 and newer. */
   4299 	if (sc->sc_type < WM_T_ICH8)
   4300 		return 0;
   4301 
   4302 	if (wm_phy_resetisblocked(sc)) {
   4303 		/* XXX */
   4304 		device_printf(dev, "PHY is blocked\n");
   4305 		return -1;
   4306 	}
   4307 
   4308 	/* Allow time for h/w to get to quiescent state after reset */
   4309 	delay(10*1000);
   4310 
   4311 	/* Perform any necessary post-reset workarounds */
   4312 	if (sc->sc_type == WM_T_PCH)
   4313 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4314 	else if (sc->sc_type == WM_T_PCH2)
   4315 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4316 	if (rv != 0)
   4317 		return rv;
   4318 
   4319 	/* Clear the host wakeup bit after lcd reset */
   4320 	if (sc->sc_type >= WM_T_PCH) {
   4321 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4322 		reg &= ~BM_WUC_HOST_WU_BIT;
   4323 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4324 	}
   4325 
   4326 	/* Configure the LCD with the extended configuration region in NVM */
   4327 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4328 		return rv;
   4329 
   4330 	/* Configure the LCD with the OEM bits in NVM */
   4331 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4332 
   4333 	if (sc->sc_type == WM_T_PCH2) {
   4334 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4335 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4336 			delay(10 * 1000);
   4337 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4338 		}
   4339 		/* Set EEE LPI Update Timer to 200usec */
   4340 		rv = sc->phy.acquire(sc);
   4341 		if (rv)
   4342 			return rv;
   4343 		rv = wm_write_emi_reg_locked(dev,
   4344 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4345 		sc->phy.release(sc);
   4346 	}
   4347 
   4348 	return rv;
   4349 }
   4350 
   4351 /* Only for PCH and newer */
   4352 static int
   4353 wm_write_smbus_addr(struct wm_softc *sc)
   4354 {
   4355 	uint32_t strap, freq;
   4356 	uint16_t phy_data;
   4357 	int rv;
   4358 
   4359 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4360 		device_xname(sc->sc_dev), __func__));
   4361 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4362 
   4363 	strap = CSR_READ(sc, WMREG_STRAP);
   4364 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4365 
   4366 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4367 	if (rv != 0)
   4368 		return -1;
   4369 
   4370 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4371 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4372 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4373 
   4374 	if (sc->sc_phytype == WMPHY_I217) {
   4375 		/* Restore SMBus frequency */
   4376 		if (freq --) {
   4377 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4378 			    | HV_SMB_ADDR_FREQ_HIGH);
   4379 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4380 			    HV_SMB_ADDR_FREQ_LOW);
   4381 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4382 			    HV_SMB_ADDR_FREQ_HIGH);
   4383 		} else
   4384 			DPRINTF(sc, WM_DEBUG_INIT,
   4385 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4386 				device_xname(sc->sc_dev), __func__));
   4387 	}
   4388 
   4389 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4390 	    phy_data);
   4391 }
   4392 
   4393 static int
   4394 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4395 {
   4396 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4397 	uint16_t phy_page = 0;
   4398 	int rv = 0;
   4399 
   4400 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4401 		device_xname(sc->sc_dev), __func__));
   4402 
   4403 	switch (sc->sc_type) {
   4404 	case WM_T_ICH8:
   4405 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4406 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4407 			return 0;
   4408 
   4409 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4410 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4411 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4412 			break;
   4413 		}
   4414 		/* FALLTHROUGH */
   4415 	case WM_T_PCH:
   4416 	case WM_T_PCH2:
   4417 	case WM_T_PCH_LPT:
   4418 	case WM_T_PCH_SPT:
   4419 	case WM_T_PCH_CNP:
   4420 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4421 		break;
   4422 	default:
   4423 		return 0;
   4424 	}
   4425 
   4426 	if ((rv = sc->phy.acquire(sc)) != 0)
   4427 		return rv;
   4428 
   4429 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4430 	if ((reg & sw_cfg_mask) == 0)
   4431 		goto release;
   4432 
   4433 	/*
   4434 	 * Make sure HW does not configure LCD from PHY extended configuration
   4435 	 * before SW configuration
   4436 	 */
   4437 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4438 	if ((sc->sc_type < WM_T_PCH2)
   4439 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4440 		goto release;
   4441 
   4442 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4443 		device_xname(sc->sc_dev), __func__));
   4444 	/* word_addr is in DWORD */
   4445 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4446 
   4447 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4448 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4449 	if (cnf_size == 0)
   4450 		goto release;
   4451 
   4452 	if (((sc->sc_type == WM_T_PCH)
   4453 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4454 	    || (sc->sc_type > WM_T_PCH)) {
   4455 		/*
   4456 		 * HW configures the SMBus address and LEDs when the OEM and
   4457 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4458 		 * are cleared, SW will configure them instead.
   4459 		 */
   4460 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4461 			device_xname(sc->sc_dev), __func__));
   4462 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4463 			goto release;
   4464 
   4465 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4466 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4467 		    (uint16_t)reg);
   4468 		if (rv != 0)
   4469 			goto release;
   4470 	}
   4471 
   4472 	/* Configure LCD from extended configuration region. */
   4473 	for (i = 0; i < cnf_size; i++) {
   4474 		uint16_t reg_data, reg_addr;
   4475 
   4476 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4477 			goto release;
   4478 
   4479 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4480 			goto release;
   4481 
   4482 		if (reg_addr == IGPHY_PAGE_SELECT)
   4483 			phy_page = reg_data;
   4484 
   4485 		reg_addr &= IGPHY_MAXREGADDR;
   4486 		reg_addr |= phy_page;
   4487 
   4488 		KASSERT(sc->phy.writereg_locked != NULL);
   4489 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4490 		    reg_data);
   4491 	}
   4492 
   4493 release:
   4494 	sc->phy.release(sc);
   4495 	return rv;
   4496 }
   4497 
   4498 /*
   4499  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4500  *  @sc:       pointer to the HW structure
   4501  *  @d0_state: boolean if entering d0 or d3 device state
   4502  *
   4503  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4504  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4505  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4506  */
   4507 int
   4508 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4509 {
   4510 	uint32_t mac_reg;
   4511 	uint16_t oem_reg;
   4512 	int rv;
   4513 
   4514 	if (sc->sc_type < WM_T_PCH)
   4515 		return 0;
   4516 
   4517 	rv = sc->phy.acquire(sc);
   4518 	if (rv != 0)
   4519 		return rv;
   4520 
   4521 	if (sc->sc_type == WM_T_PCH) {
   4522 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4523 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4524 			goto release;
   4525 	}
   4526 
   4527 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4528 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4529 		goto release;
   4530 
   4531 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4532 
   4533 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4534 	if (rv != 0)
   4535 		goto release;
   4536 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4537 
   4538 	if (d0_state) {
   4539 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4540 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4541 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4542 			oem_reg |= HV_OEM_BITS_LPLU;
   4543 	} else {
   4544 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4545 		    != 0)
   4546 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4547 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4548 		    != 0)
   4549 			oem_reg |= HV_OEM_BITS_LPLU;
   4550 	}
   4551 
   4552 	/* Set Restart auto-neg to activate the bits */
   4553 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4554 	    && (wm_phy_resetisblocked(sc) == false))
   4555 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4556 
   4557 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4558 
   4559 release:
   4560 	sc->phy.release(sc);
   4561 
   4562 	return rv;
   4563 }
   4564 
   4565 /* Init hardware bits */
   4566 void
   4567 wm_initialize_hardware_bits(struct wm_softc *sc)
   4568 {
   4569 	uint32_t tarc0, tarc1, reg;
   4570 
   4571 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4572 		device_xname(sc->sc_dev), __func__));
   4573 
   4574 	/* For 82571 variant, 80003 and ICHs */
   4575 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4576 	    || (sc->sc_type >= WM_T_80003)) {
   4577 
   4578 		/* Transmit Descriptor Control 0 */
   4579 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4580 		reg |= TXDCTL_COUNT_DESC;
   4581 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4582 
   4583 		/* Transmit Descriptor Control 1 */
   4584 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4585 		reg |= TXDCTL_COUNT_DESC;
   4586 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4587 
   4588 		/* TARC0 */
   4589 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4590 		switch (sc->sc_type) {
   4591 		case WM_T_82571:
   4592 		case WM_T_82572:
   4593 		case WM_T_82573:
   4594 		case WM_T_82574:
   4595 		case WM_T_82583:
   4596 		case WM_T_80003:
   4597 			/* Clear bits 30..27 */
   4598 			tarc0 &= ~__BITS(30, 27);
   4599 			break;
   4600 		default:
   4601 			break;
   4602 		}
   4603 
   4604 		switch (sc->sc_type) {
   4605 		case WM_T_82571:
   4606 		case WM_T_82572:
   4607 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4608 
   4609 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4610 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4611 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4612 			/* 8257[12] Errata No.7 */
   4613 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4614 
   4615 			/* TARC1 bit 28 */
   4616 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4617 				tarc1 &= ~__BIT(28);
   4618 			else
   4619 				tarc1 |= __BIT(28);
   4620 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4621 
   4622 			/*
   4623 			 * 8257[12] Errata No.13
   4624 			 * Disable Dyamic Clock Gating.
   4625 			 */
   4626 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4627 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4628 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4629 			break;
   4630 		case WM_T_82573:
   4631 		case WM_T_82574:
   4632 		case WM_T_82583:
   4633 			if ((sc->sc_type == WM_T_82574)
   4634 			    || (sc->sc_type == WM_T_82583))
   4635 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4636 
   4637 			/* Extended Device Control */
   4638 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4639 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4640 			reg |= __BIT(22);	/* Set bit 22 */
   4641 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4642 
   4643 			/* Device Control */
   4644 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4645 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4646 
   4647 			/* PCIe Control Register */
   4648 			/*
   4649 			 * 82573 Errata (unknown).
   4650 			 *
   4651 			 * 82574 Errata 25 and 82583 Errata 12
   4652 			 * "Dropped Rx Packets":
   4653 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4654 			 */
   4655 			reg = CSR_READ(sc, WMREG_GCR);
   4656 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4657 			CSR_WRITE(sc, WMREG_GCR, reg);
   4658 
   4659 			if ((sc->sc_type == WM_T_82574)
   4660 			    || (sc->sc_type == WM_T_82583)) {
   4661 				/*
   4662 				 * Document says this bit must be set for
   4663 				 * proper operation.
   4664 				 */
   4665 				reg = CSR_READ(sc, WMREG_GCR);
   4666 				reg |= __BIT(22);
   4667 				CSR_WRITE(sc, WMREG_GCR, reg);
   4668 
   4669 				/*
   4670 				 * Apply workaround for hardware errata
   4671 				 * documented in errata docs Fixes issue where
   4672 				 * some error prone or unreliable PCIe
   4673 				 * completions are occurring, particularly
   4674 				 * with ASPM enabled. Without fix, issue can
   4675 				 * cause Tx timeouts.
   4676 				 */
   4677 				reg = CSR_READ(sc, WMREG_GCR2);
   4678 				reg |= __BIT(0);
   4679 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4680 			}
   4681 			break;
   4682 		case WM_T_80003:
   4683 			/* TARC0 */
   4684 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4685 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4686 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4687 
   4688 			/* TARC1 bit 28 */
   4689 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4690 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4691 				tarc1 &= ~__BIT(28);
   4692 			else
   4693 				tarc1 |= __BIT(28);
   4694 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4695 			break;
   4696 		case WM_T_ICH8:
   4697 		case WM_T_ICH9:
   4698 		case WM_T_ICH10:
   4699 		case WM_T_PCH:
   4700 		case WM_T_PCH2:
   4701 		case WM_T_PCH_LPT:
   4702 		case WM_T_PCH_SPT:
   4703 		case WM_T_PCH_CNP:
   4704 			/* TARC0 */
   4705 			if (sc->sc_type == WM_T_ICH8) {
   4706 				/* Set TARC0 bits 29 and 28 */
   4707 				tarc0 |= __BITS(29, 28);
   4708 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4709 				tarc0 |= __BIT(29);
   4710 				/*
   4711 				 *  Drop bit 28. From Linux.
   4712 				 * See I218/I219 spec update
   4713 				 * "5. Buffer Overrun While the I219 is
   4714 				 * Processing DMA Transactions"
   4715 				 */
   4716 				tarc0 &= ~__BIT(28);
   4717 			}
   4718 			/* Set TARC0 bits 23,24,26,27 */
   4719 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4720 
   4721 			/* CTRL_EXT */
   4722 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4723 			reg |= __BIT(22);	/* Set bit 22 */
   4724 			/*
   4725 			 * Enable PHY low-power state when MAC is at D3
   4726 			 * w/o WoL
   4727 			 */
   4728 			if (sc->sc_type >= WM_T_PCH)
   4729 				reg |= CTRL_EXT_PHYPDEN;
   4730 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4731 
   4732 			/* TARC1 */
   4733 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4734 			/* bit 28 */
   4735 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4736 				tarc1 &= ~__BIT(28);
   4737 			else
   4738 				tarc1 |= __BIT(28);
   4739 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4740 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4741 
   4742 			/* Device Status */
   4743 			if (sc->sc_type == WM_T_ICH8) {
   4744 				reg = CSR_READ(sc, WMREG_STATUS);
   4745 				reg &= ~__BIT(31);
   4746 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4747 
   4748 			}
   4749 
   4750 			/* IOSFPC */
   4751 			if (sc->sc_type == WM_T_PCH_SPT) {
   4752 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4753 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4754 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4755 			}
   4756 			/*
   4757 			 * Work-around descriptor data corruption issue during
   4758 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4759 			 * capability.
   4760 			 */
   4761 			reg = CSR_READ(sc, WMREG_RFCTL);
   4762 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4763 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4764 			break;
   4765 		default:
   4766 			break;
   4767 		}
   4768 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4769 
   4770 		switch (sc->sc_type) {
   4771 		/*
   4772 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4773 		 * Avoid RSS Hash Value bug.
   4774 		 */
   4775 		case WM_T_82571:
   4776 		case WM_T_82572:
   4777 		case WM_T_82573:
   4778 		case WM_T_80003:
   4779 		case WM_T_ICH8:
   4780 			reg = CSR_READ(sc, WMREG_RFCTL);
   4781 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4782 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4783 			break;
   4784 		case WM_T_82574:
   4785 			/* Use extened Rx descriptor. */
   4786 			reg = CSR_READ(sc, WMREG_RFCTL);
   4787 			reg |= WMREG_RFCTL_EXSTEN;
   4788 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4789 			break;
   4790 		default:
   4791 			break;
   4792 		}
   4793 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4794 		/*
   4795 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4796 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4797 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4798 		 * Correctly by the Device"
   4799 		 *
   4800 		 * I354(C2000) Errata AVR53:
   4801 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4802 		 * Hang"
   4803 		 */
   4804 		reg = CSR_READ(sc, WMREG_RFCTL);
   4805 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4806 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4807 	}
   4808 }
   4809 
   4810 static uint32_t
   4811 wm_rxpbs_adjust_82580(uint32_t val)
   4812 {
   4813 	uint32_t rv = 0;
   4814 
   4815 	if (val < __arraycount(wm_82580_rxpbs_table))
   4816 		rv = wm_82580_rxpbs_table[val];
   4817 
   4818 	return rv;
   4819 }
   4820 
   4821 /*
   4822  * wm_reset_phy:
   4823  *
   4824  *	generic PHY reset function.
   4825  *	Same as e1000_phy_hw_reset_generic()
   4826  */
   4827 static int
   4828 wm_reset_phy(struct wm_softc *sc)
   4829 {
   4830 	uint32_t reg;
   4831 
   4832 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4833 		device_xname(sc->sc_dev), __func__));
   4834 	if (wm_phy_resetisblocked(sc))
   4835 		return -1;
   4836 
   4837 	sc->phy.acquire(sc);
   4838 
   4839 	reg = CSR_READ(sc, WMREG_CTRL);
   4840 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4841 	CSR_WRITE_FLUSH(sc);
   4842 
   4843 	delay(sc->phy.reset_delay_us);
   4844 
   4845 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4846 	CSR_WRITE_FLUSH(sc);
   4847 
   4848 	delay(150);
   4849 
   4850 	sc->phy.release(sc);
   4851 
   4852 	wm_get_cfg_done(sc);
   4853 	wm_phy_post_reset(sc);
   4854 
   4855 	return 0;
   4856 }
   4857 
   4858 /*
   4859  * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
   4860  *
   4861  * In i219, the descriptor rings must be emptied before resetting the HW
   4862  * or before changing the device state to D3 during runtime (runtime PM).
   4863  *
   4864  * Failure to do this will cause the HW to enter a unit hang state which can
   4865  * only be released by PCI reset on the device.
   4866  *
   4867  * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
   4868  */
   4869 static void
   4870 wm_flush_desc_rings(struct wm_softc *sc)
   4871 {
   4872 	pcireg_t preg;
   4873 	uint32_t reg;
   4874 	struct wm_txqueue *txq;
   4875 	wiseman_txdesc_t *txd;
   4876 	int nexttx;
   4877 	uint32_t rctl;
   4878 
   4879 	/* First, disable MULR fix in FEXTNVM11 */
   4880 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4881 	reg |= FEXTNVM11_DIS_MULRFIX;
   4882 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4883 
   4884 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4885 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4886 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4887 		return;
   4888 
   4889 	/*
   4890 	 * Remove all descriptors from the tx_ring.
   4891 	 *
   4892 	 * We want to clear all pending descriptors from the TX ring. Zeroing
   4893 	 * happens when the HW reads the regs. We  assign the ring itself as
   4894 	 * the data of the next descriptor. We don't care about the data we are
   4895 	 * about to reset the HW.
   4896 	 */
   4897 #ifdef WM_DEBUG
   4898 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
   4899 #endif
   4900 	reg = CSR_READ(sc, WMREG_TCTL);
   4901 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4902 
   4903 	txq = &sc->sc_queue[0].wmq_txq;
   4904 	nexttx = txq->txq_next;
   4905 	txd = &txq->txq_descs[nexttx];
   4906 	wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
   4907 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4908 	txd->wtx_fields.wtxu_status = 0;
   4909 	txd->wtx_fields.wtxu_options = 0;
   4910 	txd->wtx_fields.wtxu_vlan = 0;
   4911 
   4912 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4913 	    BUS_SPACE_BARRIER_WRITE);
   4914 
   4915 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4916 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4917 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4918 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4919 	delay(250);
   4920 
   4921 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4922 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4923 		return;
   4924 
   4925 	/*
   4926 	 * Mark all descriptors in the RX ring as consumed and disable the
   4927 	 * rx ring.
   4928 	 */
   4929 #ifdef WM_DEBUG
   4930 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   4931 #endif
   4932 	rctl = CSR_READ(sc, WMREG_RCTL);
   4933 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4934 	CSR_WRITE_FLUSH(sc);
   4935 	delay(150);
   4936 
   4937 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4938 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   4939 	reg &= 0xffffc000;
   4940 	/*
   4941 	 * Update thresholds: prefetch threshold to 31, host threshold
   4942 	 * to 1 and make sure the granularity is "descriptors" and not
   4943 	 * "cache lines"
   4944 	 */
   4945 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4946 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4947 
   4948 	/* Momentarily enable the RX ring for the changes to take effect */
   4949 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4950 	CSR_WRITE_FLUSH(sc);
   4951 	delay(150);
   4952 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4953 }
   4954 
   4955 /*
   4956  * wm_reset:
   4957  *
   4958  *	Reset the i82542 chip.
   4959  */
   4960 static void
   4961 wm_reset(struct wm_softc *sc)
   4962 {
   4963 	int phy_reset = 0;
   4964 	int i, error = 0;
   4965 	uint32_t reg;
   4966 	uint16_t kmreg;
   4967 	int rv;
   4968 
   4969 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4970 		device_xname(sc->sc_dev), __func__));
   4971 	KASSERT(sc->sc_type != 0);
   4972 
   4973 	/*
   4974 	 * Allocate on-chip memory according to the MTU size.
   4975 	 * The Packet Buffer Allocation register must be written
   4976 	 * before the chip is reset.
   4977 	 */
   4978 	switch (sc->sc_type) {
   4979 	case WM_T_82547:
   4980 	case WM_T_82547_2:
   4981 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4982 		    PBA_22K : PBA_30K;
   4983 		for (i = 0; i < sc->sc_nqueues; i++) {
   4984 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4985 			txq->txq_fifo_head = 0;
   4986 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4987 			txq->txq_fifo_size =
   4988 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4989 			txq->txq_fifo_stall = 0;
   4990 		}
   4991 		break;
   4992 	case WM_T_82571:
   4993 	case WM_T_82572:
   4994 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4995 	case WM_T_80003:
   4996 		sc->sc_pba = PBA_32K;
   4997 		break;
   4998 	case WM_T_82573:
   4999 		sc->sc_pba = PBA_12K;
   5000 		break;
   5001 	case WM_T_82574:
   5002 	case WM_T_82583:
   5003 		sc->sc_pba = PBA_20K;
   5004 		break;
   5005 	case WM_T_82576:
   5006 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   5007 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   5008 		break;
   5009 	case WM_T_82580:
   5010 	case WM_T_I350:
   5011 	case WM_T_I354:
   5012 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   5013 		break;
   5014 	case WM_T_I210:
   5015 	case WM_T_I211:
   5016 		sc->sc_pba = PBA_34K;
   5017 		break;
   5018 	case WM_T_ICH8:
   5019 		/* Workaround for a bit corruption issue in FIFO memory */
   5020 		sc->sc_pba = PBA_8K;
   5021 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   5022 		break;
   5023 	case WM_T_ICH9:
   5024 	case WM_T_ICH10:
   5025 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   5026 		    PBA_14K : PBA_10K;
   5027 		break;
   5028 	case WM_T_PCH:
   5029 	case WM_T_PCH2:	/* XXX 14K? */
   5030 	case WM_T_PCH_LPT:
   5031 	case WM_T_PCH_SPT:
   5032 	case WM_T_PCH_CNP:
   5033 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
   5034 		    PBA_12K : PBA_26K;
   5035 		break;
   5036 	default:
   5037 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5038 		    PBA_40K : PBA_48K;
   5039 		break;
   5040 	}
   5041 	/*
   5042 	 * Only old or non-multiqueue devices have the PBA register
   5043 	 * XXX Need special handling for 82575.
   5044 	 */
   5045 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5046 	    || (sc->sc_type == WM_T_82575))
   5047 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   5048 
   5049 	/* Prevent the PCI-E bus from sticking */
   5050 	if (sc->sc_flags & WM_F_PCIE) {
   5051 		int timeout = 800;
   5052 
   5053 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   5054 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5055 
   5056 		while (timeout--) {
   5057 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   5058 			    == 0)
   5059 				break;
   5060 			delay(100);
   5061 		}
   5062 		if (timeout == 0)
   5063 			device_printf(sc->sc_dev,
   5064 			    "failed to disable busmastering\n");
   5065 	}
   5066 
   5067 	/* Set the completion timeout for interface */
   5068 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   5069 	    || (sc->sc_type == WM_T_82580)
   5070 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5071 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   5072 		wm_set_pcie_completion_timeout(sc);
   5073 
   5074 	/* Clear interrupt */
   5075 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5076 	if (wm_is_using_msix(sc)) {
   5077 		if (sc->sc_type != WM_T_82574) {
   5078 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5079 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5080 		} else
   5081 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5082 	}
   5083 
   5084 	/* Stop the transmit and receive processes. */
   5085 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5086 	sc->sc_rctl &= ~RCTL_EN;
   5087 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   5088 	CSR_WRITE_FLUSH(sc);
   5089 
   5090 	/* XXX set_tbi_sbp_82543() */
   5091 
   5092 	delay(10*1000);
   5093 
   5094 	/* Must acquire the MDIO ownership before MAC reset */
   5095 	switch (sc->sc_type) {
   5096 	case WM_T_82573:
   5097 	case WM_T_82574:
   5098 	case WM_T_82583:
   5099 		error = wm_get_hw_semaphore_82573(sc);
   5100 		break;
   5101 	default:
   5102 		break;
   5103 	}
   5104 
   5105 	/*
   5106 	 * 82541 Errata 29? & 82547 Errata 28?
   5107 	 * See also the description about PHY_RST bit in CTRL register
   5108 	 * in 8254x_GBe_SDM.pdf.
   5109 	 */
   5110 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   5111 		CSR_WRITE(sc, WMREG_CTRL,
   5112 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   5113 		CSR_WRITE_FLUSH(sc);
   5114 		delay(5000);
   5115 	}
   5116 
   5117 	switch (sc->sc_type) {
   5118 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   5119 	case WM_T_82541:
   5120 	case WM_T_82541_2:
   5121 	case WM_T_82547:
   5122 	case WM_T_82547_2:
   5123 		/*
   5124 		 * On some chipsets, a reset through a memory-mapped write
   5125 		 * cycle can cause the chip to reset before completing the
   5126 		 * write cycle. This causes major headache that can be avoided
   5127 		 * by issuing the reset via indirect register writes through
   5128 		 * I/O space.
   5129 		 *
   5130 		 * So, if we successfully mapped the I/O BAR at attach time,
   5131 		 * use that. Otherwise, try our luck with a memory-mapped
   5132 		 * reset.
   5133 		 */
   5134 		if (sc->sc_flags & WM_F_IOH_VALID)
   5135 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   5136 		else
   5137 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   5138 		break;
   5139 	case WM_T_82545_3:
   5140 	case WM_T_82546_3:
   5141 		/* Use the shadow control register on these chips. */
   5142 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   5143 		break;
   5144 	case WM_T_80003:
   5145 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5146 		sc->phy.acquire(sc);
   5147 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5148 		sc->phy.release(sc);
   5149 		break;
   5150 	case WM_T_ICH8:
   5151 	case WM_T_ICH9:
   5152 	case WM_T_ICH10:
   5153 	case WM_T_PCH:
   5154 	case WM_T_PCH2:
   5155 	case WM_T_PCH_LPT:
   5156 	case WM_T_PCH_SPT:
   5157 	case WM_T_PCH_CNP:
   5158 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5159 		if (wm_phy_resetisblocked(sc) == false) {
   5160 			/*
   5161 			 * Gate automatic PHY configuration by hardware on
   5162 			 * non-managed 82579
   5163 			 */
   5164 			if ((sc->sc_type == WM_T_PCH2)
   5165 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   5166 				== 0))
   5167 				wm_gate_hw_phy_config_ich8lan(sc, true);
   5168 
   5169 			reg |= CTRL_PHY_RESET;
   5170 			phy_reset = 1;
   5171 		} else
   5172 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   5173 		sc->phy.acquire(sc);
   5174 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5175 		/* Don't insert a completion barrier when reset */
   5176 		delay(20*1000);
   5177 		mutex_exit(sc->sc_ich_phymtx);
   5178 		break;
   5179 	case WM_T_82580:
   5180 	case WM_T_I350:
   5181 	case WM_T_I354:
   5182 	case WM_T_I210:
   5183 	case WM_T_I211:
   5184 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5185 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5186 			CSR_WRITE_FLUSH(sc);
   5187 		delay(5000);
   5188 		break;
   5189 	case WM_T_82542_2_0:
   5190 	case WM_T_82542_2_1:
   5191 	case WM_T_82543:
   5192 	case WM_T_82540:
   5193 	case WM_T_82545:
   5194 	case WM_T_82546:
   5195 	case WM_T_82571:
   5196 	case WM_T_82572:
   5197 	case WM_T_82573:
   5198 	case WM_T_82574:
   5199 	case WM_T_82575:
   5200 	case WM_T_82576:
   5201 	case WM_T_82583:
   5202 	default:
   5203 		/* Everything else can safely use the documented method. */
   5204 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5205 		break;
   5206 	}
   5207 
   5208 	/* Must release the MDIO ownership after MAC reset */
   5209 	switch (sc->sc_type) {
   5210 	case WM_T_82573:
   5211 	case WM_T_82574:
   5212 	case WM_T_82583:
   5213 		if (error == 0)
   5214 			wm_put_hw_semaphore_82573(sc);
   5215 		break;
   5216 	default:
   5217 		break;
   5218 	}
   5219 
   5220 	/* Set Phy Config Counter to 50msec */
   5221 	if (sc->sc_type == WM_T_PCH2) {
   5222 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5223 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5224 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5225 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5226 	}
   5227 
   5228 	if (phy_reset != 0)
   5229 		wm_get_cfg_done(sc);
   5230 
   5231 	/* Reload EEPROM */
   5232 	switch (sc->sc_type) {
   5233 	case WM_T_82542_2_0:
   5234 	case WM_T_82542_2_1:
   5235 	case WM_T_82543:
   5236 	case WM_T_82544:
   5237 		delay(10);
   5238 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5239 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5240 		CSR_WRITE_FLUSH(sc);
   5241 		delay(2000);
   5242 		break;
   5243 	case WM_T_82540:
   5244 	case WM_T_82545:
   5245 	case WM_T_82545_3:
   5246 	case WM_T_82546:
   5247 	case WM_T_82546_3:
   5248 		delay(5*1000);
   5249 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5250 		break;
   5251 	case WM_T_82541:
   5252 	case WM_T_82541_2:
   5253 	case WM_T_82547:
   5254 	case WM_T_82547_2:
   5255 		delay(20000);
   5256 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5257 		break;
   5258 	case WM_T_82571:
   5259 	case WM_T_82572:
   5260 	case WM_T_82573:
   5261 	case WM_T_82574:
   5262 	case WM_T_82583:
   5263 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5264 			delay(10);
   5265 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5266 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5267 			CSR_WRITE_FLUSH(sc);
   5268 		}
   5269 		/* check EECD_EE_AUTORD */
   5270 		wm_get_auto_rd_done(sc);
   5271 		/*
   5272 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5273 		 * is set.
   5274 		 */
   5275 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5276 		    || (sc->sc_type == WM_T_82583))
   5277 			delay(25*1000);
   5278 		break;
   5279 	case WM_T_82575:
   5280 	case WM_T_82576:
   5281 	case WM_T_82580:
   5282 	case WM_T_I350:
   5283 	case WM_T_I354:
   5284 	case WM_T_I210:
   5285 	case WM_T_I211:
   5286 	case WM_T_80003:
   5287 		/* check EECD_EE_AUTORD */
   5288 		wm_get_auto_rd_done(sc);
   5289 		break;
   5290 	case WM_T_ICH8:
   5291 	case WM_T_ICH9:
   5292 	case WM_T_ICH10:
   5293 	case WM_T_PCH:
   5294 	case WM_T_PCH2:
   5295 	case WM_T_PCH_LPT:
   5296 	case WM_T_PCH_SPT:
   5297 	case WM_T_PCH_CNP:
   5298 		break;
   5299 	default:
   5300 		panic("%s: unknown type\n", __func__);
   5301 	}
   5302 
   5303 	/* Check whether EEPROM is present or not */
   5304 	switch (sc->sc_type) {
   5305 	case WM_T_82575:
   5306 	case WM_T_82576:
   5307 	case WM_T_82580:
   5308 	case WM_T_I350:
   5309 	case WM_T_I354:
   5310 	case WM_T_ICH8:
   5311 	case WM_T_ICH9:
   5312 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5313 			/* Not found */
   5314 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5315 			if (sc->sc_type == WM_T_82575)
   5316 				wm_reset_init_script_82575(sc);
   5317 		}
   5318 		break;
   5319 	default:
   5320 		break;
   5321 	}
   5322 
   5323 	if (phy_reset != 0)
   5324 		wm_phy_post_reset(sc);
   5325 
   5326 	if ((sc->sc_type == WM_T_82580)
   5327 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5328 		/* Clear global device reset status bit */
   5329 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5330 	}
   5331 
   5332 	/* Clear any pending interrupt events. */
   5333 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5334 	reg = CSR_READ(sc, WMREG_ICR);
   5335 	if (wm_is_using_msix(sc)) {
   5336 		if (sc->sc_type != WM_T_82574) {
   5337 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5338 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5339 		} else
   5340 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5341 	}
   5342 
   5343 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5344 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5345 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5346 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5347 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5348 		reg |= KABGTXD_BGSQLBIAS;
   5349 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5350 	}
   5351 
   5352 	/* Reload sc_ctrl */
   5353 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5354 
   5355 	wm_set_eee(sc);
   5356 
   5357 	/*
   5358 	 * For PCH, this write will make sure that any noise will be detected
   5359 	 * as a CRC error and be dropped rather than show up as a bad packet
   5360 	 * to the DMA engine
   5361 	 */
   5362 	if (sc->sc_type == WM_T_PCH)
   5363 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5364 
   5365 	if (sc->sc_type >= WM_T_82544)
   5366 		CSR_WRITE(sc, WMREG_WUC, 0);
   5367 
   5368 	if (sc->sc_type < WM_T_82575)
   5369 		wm_disable_aspm(sc); /* Workaround for some chips */
   5370 
   5371 	wm_reset_mdicnfg_82580(sc);
   5372 
   5373 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5374 		wm_pll_workaround_i210(sc);
   5375 
   5376 	if (sc->sc_type == WM_T_80003) {
   5377 		/* Default to TRUE to enable the MDIC W/A */
   5378 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5379 
   5380 		rv = wm_kmrn_readreg(sc,
   5381 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5382 		if (rv == 0) {
   5383 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5384 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5385 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5386 			else
   5387 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5388 		}
   5389 	}
   5390 }
   5391 
   5392 /*
   5393  * wm_add_rxbuf:
   5394  *
   5395  *	Add a receive buffer to the indiciated descriptor.
   5396  */
   5397 static int
   5398 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5399 {
   5400 	struct wm_softc *sc = rxq->rxq_sc;
   5401 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5402 	struct mbuf *m;
   5403 	int error;
   5404 
   5405 	KASSERT(mutex_owned(rxq->rxq_lock));
   5406 
   5407 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5408 	if (m == NULL)
   5409 		return ENOBUFS;
   5410 
   5411 	MCLGET(m, M_DONTWAIT);
   5412 	if ((m->m_flags & M_EXT) == 0) {
   5413 		m_freem(m);
   5414 		return ENOBUFS;
   5415 	}
   5416 
   5417 	if (rxs->rxs_mbuf != NULL)
   5418 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5419 
   5420 	rxs->rxs_mbuf = m;
   5421 
   5422 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5423 	/*
   5424 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5425 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5426 	 */
   5427 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5428 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5429 	if (error) {
   5430 		/* XXX XXX XXX */
   5431 		aprint_error_dev(sc->sc_dev,
   5432 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5433 		panic("wm_add_rxbuf");
   5434 	}
   5435 
   5436 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5437 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5438 
   5439 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5440 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5441 			wm_init_rxdesc(rxq, idx);
   5442 	} else
   5443 		wm_init_rxdesc(rxq, idx);
   5444 
   5445 	return 0;
   5446 }
   5447 
   5448 /*
   5449  * wm_rxdrain:
   5450  *
   5451  *	Drain the receive queue.
   5452  */
   5453 static void
   5454 wm_rxdrain(struct wm_rxqueue *rxq)
   5455 {
   5456 	struct wm_softc *sc = rxq->rxq_sc;
   5457 	struct wm_rxsoft *rxs;
   5458 	int i;
   5459 
   5460 	KASSERT(mutex_owned(rxq->rxq_lock));
   5461 
   5462 	for (i = 0; i < WM_NRXDESC; i++) {
   5463 		rxs = &rxq->rxq_soft[i];
   5464 		if (rxs->rxs_mbuf != NULL) {
   5465 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5466 			m_freem(rxs->rxs_mbuf);
   5467 			rxs->rxs_mbuf = NULL;
   5468 		}
   5469 	}
   5470 }
   5471 
   5472 /*
   5473  * Setup registers for RSS.
   5474  *
   5475  * XXX not yet VMDq support
   5476  */
   5477 static void
   5478 wm_init_rss(struct wm_softc *sc)
   5479 {
   5480 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5481 	int i;
   5482 
   5483 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5484 
   5485 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5486 		unsigned int qid, reta_ent;
   5487 
   5488 		qid  = i % sc->sc_nqueues;
   5489 		switch (sc->sc_type) {
   5490 		case WM_T_82574:
   5491 			reta_ent = __SHIFTIN(qid,
   5492 			    RETA_ENT_QINDEX_MASK_82574);
   5493 			break;
   5494 		case WM_T_82575:
   5495 			reta_ent = __SHIFTIN(qid,
   5496 			    RETA_ENT_QINDEX1_MASK_82575);
   5497 			break;
   5498 		default:
   5499 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5500 			break;
   5501 		}
   5502 
   5503 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5504 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5505 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5506 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5507 	}
   5508 
   5509 	rss_getkey((uint8_t *)rss_key);
   5510 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5511 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5512 
   5513 	if (sc->sc_type == WM_T_82574)
   5514 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5515 	else
   5516 		mrqc = MRQC_ENABLE_RSS_MQ;
   5517 
   5518 	/*
   5519 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5520 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5521 	 */
   5522 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5523 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5524 #if 0
   5525 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5526 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   5527 #endif
   5528 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   5529 
   5530 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5531 }
   5532 
   5533 /*
   5534  * Adjust TX and RX queue numbers which the system actulally uses.
   5535  *
   5536  * The numbers are affected by below parameters.
   5537  *     - The nubmer of hardware queues
   5538  *     - The number of MSI-X vectors (= "nvectors" argument)
   5539  *     - ncpu
   5540  */
   5541 static void
   5542 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5543 {
   5544 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5545 
   5546 	if (nvectors < 2) {
   5547 		sc->sc_nqueues = 1;
   5548 		return;
   5549 	}
   5550 
   5551 	switch (sc->sc_type) {
   5552 	case WM_T_82572:
   5553 		hw_ntxqueues = 2;
   5554 		hw_nrxqueues = 2;
   5555 		break;
   5556 	case WM_T_82574:
   5557 		hw_ntxqueues = 2;
   5558 		hw_nrxqueues = 2;
   5559 		break;
   5560 	case WM_T_82575:
   5561 		hw_ntxqueues = 4;
   5562 		hw_nrxqueues = 4;
   5563 		break;
   5564 	case WM_T_82576:
   5565 		hw_ntxqueues = 16;
   5566 		hw_nrxqueues = 16;
   5567 		break;
   5568 	case WM_T_82580:
   5569 	case WM_T_I350:
   5570 	case WM_T_I354:
   5571 		hw_ntxqueues = 8;
   5572 		hw_nrxqueues = 8;
   5573 		break;
   5574 	case WM_T_I210:
   5575 		hw_ntxqueues = 4;
   5576 		hw_nrxqueues = 4;
   5577 		break;
   5578 	case WM_T_I211:
   5579 		hw_ntxqueues = 2;
   5580 		hw_nrxqueues = 2;
   5581 		break;
   5582 		/*
   5583 		 * As below ethernet controllers does not support MSI-X,
   5584 		 * this driver let them not use multiqueue.
   5585 		 *     - WM_T_80003
   5586 		 *     - WM_T_ICH8
   5587 		 *     - WM_T_ICH9
   5588 		 *     - WM_T_ICH10
   5589 		 *     - WM_T_PCH
   5590 		 *     - WM_T_PCH2
   5591 		 *     - WM_T_PCH_LPT
   5592 		 */
   5593 	default:
   5594 		hw_ntxqueues = 1;
   5595 		hw_nrxqueues = 1;
   5596 		break;
   5597 	}
   5598 
   5599 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5600 
   5601 	/*
   5602 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5603 	 * the number of queues used actually.
   5604 	 */
   5605 	if (nvectors < hw_nqueues + 1)
   5606 		sc->sc_nqueues = nvectors - 1;
   5607 	else
   5608 		sc->sc_nqueues = hw_nqueues;
   5609 
   5610 	/*
   5611 	 * As queues more then cpus cannot improve scaling, we limit
   5612 	 * the number of queues used actually.
   5613 	 */
   5614 	if (ncpu < sc->sc_nqueues)
   5615 		sc->sc_nqueues = ncpu;
   5616 }
   5617 
   5618 static inline bool
   5619 wm_is_using_msix(struct wm_softc *sc)
   5620 {
   5621 
   5622 	return (sc->sc_nintrs > 1);
   5623 }
   5624 
   5625 static inline bool
   5626 wm_is_using_multiqueue(struct wm_softc *sc)
   5627 {
   5628 
   5629 	return (sc->sc_nqueues > 1);
   5630 }
   5631 
   5632 static int
   5633 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   5634 {
   5635 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5636 
   5637 	wmq->wmq_id = qidx;
   5638 	wmq->wmq_intr_idx = intr_idx;
   5639 	wmq->wmq_si = softint_establish(SOFTINT_NET | WM_SOFTINT_FLAGS,
   5640 	    wm_handle_queue, wmq);
   5641 	if (wmq->wmq_si != NULL)
   5642 		return 0;
   5643 
   5644 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5645 	    wmq->wmq_id);
   5646 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5647 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5648 	return ENOMEM;
   5649 }
   5650 
   5651 /*
   5652  * Both single interrupt MSI and INTx can use this function.
   5653  */
   5654 static int
   5655 wm_setup_legacy(struct wm_softc *sc)
   5656 {
   5657 	pci_chipset_tag_t pc = sc->sc_pc;
   5658 	const char *intrstr = NULL;
   5659 	char intrbuf[PCI_INTRSTR_LEN];
   5660 	int error;
   5661 
   5662 	error = wm_alloc_txrx_queues(sc);
   5663 	if (error) {
   5664 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5665 		    error);
   5666 		return ENOMEM;
   5667 	}
   5668 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5669 	    sizeof(intrbuf));
   5670 #ifdef WM_MPSAFE
   5671 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5672 #endif
   5673 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5674 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5675 	if (sc->sc_ihs[0] == NULL) {
   5676 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5677 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5678 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5679 		return ENOMEM;
   5680 	}
   5681 
   5682 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5683 	sc->sc_nintrs = 1;
   5684 
   5685 	return wm_softint_establish_queue(sc, 0, 0);
   5686 }
   5687 
   5688 static int
   5689 wm_setup_msix(struct wm_softc *sc)
   5690 {
   5691 	void *vih;
   5692 	kcpuset_t *affinity;
   5693 	int qidx, error, intr_idx, txrx_established;
   5694 	pci_chipset_tag_t pc = sc->sc_pc;
   5695 	const char *intrstr = NULL;
   5696 	char intrbuf[PCI_INTRSTR_LEN];
   5697 	char intr_xname[INTRDEVNAMEBUF];
   5698 
   5699 	if (sc->sc_nqueues < ncpu) {
   5700 		/*
   5701 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5702 		 * interrupts start from CPU#1.
   5703 		 */
   5704 		sc->sc_affinity_offset = 1;
   5705 	} else {
   5706 		/*
   5707 		 * In this case, this device use all CPUs. So, we unify
   5708 		 * affinitied cpu_index to msix vector number for readability.
   5709 		 */
   5710 		sc->sc_affinity_offset = 0;
   5711 	}
   5712 
   5713 	error = wm_alloc_txrx_queues(sc);
   5714 	if (error) {
   5715 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5716 		    error);
   5717 		return ENOMEM;
   5718 	}
   5719 
   5720 	kcpuset_create(&affinity, false);
   5721 	intr_idx = 0;
   5722 
   5723 	/*
   5724 	 * TX and RX
   5725 	 */
   5726 	txrx_established = 0;
   5727 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5728 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5729 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5730 
   5731 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5732 		    sizeof(intrbuf));
   5733 #ifdef WM_MPSAFE
   5734 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5735 		    PCI_INTR_MPSAFE, true);
   5736 #endif
   5737 		memset(intr_xname, 0, sizeof(intr_xname));
   5738 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5739 		    device_xname(sc->sc_dev), qidx);
   5740 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5741 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5742 		if (vih == NULL) {
   5743 			aprint_error_dev(sc->sc_dev,
   5744 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5745 			    intrstr ? " at " : "",
   5746 			    intrstr ? intrstr : "");
   5747 
   5748 			goto fail;
   5749 		}
   5750 		kcpuset_zero(affinity);
   5751 		/* Round-robin affinity */
   5752 		kcpuset_set(affinity, affinity_to);
   5753 		error = interrupt_distribute(vih, affinity, NULL);
   5754 		if (error == 0) {
   5755 			aprint_normal_dev(sc->sc_dev,
   5756 			    "for TX and RX interrupting at %s affinity to %u\n",
   5757 			    intrstr, affinity_to);
   5758 		} else {
   5759 			aprint_normal_dev(sc->sc_dev,
   5760 			    "for TX and RX interrupting at %s\n", intrstr);
   5761 		}
   5762 		sc->sc_ihs[intr_idx] = vih;
   5763 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   5764 			goto fail;
   5765 		txrx_established++;
   5766 		intr_idx++;
   5767 	}
   5768 
   5769 	/* LINK */
   5770 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5771 	    sizeof(intrbuf));
   5772 #ifdef WM_MPSAFE
   5773 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5774 #endif
   5775 	memset(intr_xname, 0, sizeof(intr_xname));
   5776 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5777 	    device_xname(sc->sc_dev));
   5778 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5779 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5780 	if (vih == NULL) {
   5781 		aprint_error_dev(sc->sc_dev,
   5782 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5783 		    intrstr ? " at " : "",
   5784 		    intrstr ? intrstr : "");
   5785 
   5786 		goto fail;
   5787 	}
   5788 	/* Keep default affinity to LINK interrupt */
   5789 	aprint_normal_dev(sc->sc_dev,
   5790 	    "for LINK interrupting at %s\n", intrstr);
   5791 	sc->sc_ihs[intr_idx] = vih;
   5792 	sc->sc_link_intr_idx = intr_idx;
   5793 
   5794 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5795 	kcpuset_destroy(affinity);
   5796 	return 0;
   5797 
   5798  fail:
   5799 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5800 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5801 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5802 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5803 	}
   5804 
   5805 	kcpuset_destroy(affinity);
   5806 	return ENOMEM;
   5807 }
   5808 
   5809 static void
   5810 wm_unset_stopping_flags(struct wm_softc *sc)
   5811 {
   5812 	int i;
   5813 
   5814 	KASSERT(WM_CORE_LOCKED(sc));
   5815 
   5816 	/* Must unset stopping flags in ascending order. */
   5817 	for (i = 0; i < sc->sc_nqueues; i++) {
   5818 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5819 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5820 
   5821 		mutex_enter(txq->txq_lock);
   5822 		txq->txq_stopping = false;
   5823 		mutex_exit(txq->txq_lock);
   5824 
   5825 		mutex_enter(rxq->rxq_lock);
   5826 		rxq->rxq_stopping = false;
   5827 		mutex_exit(rxq->rxq_lock);
   5828 	}
   5829 
   5830 	sc->sc_core_stopping = false;
   5831 }
   5832 
   5833 static void
   5834 wm_set_stopping_flags(struct wm_softc *sc)
   5835 {
   5836 	int i;
   5837 
   5838 	KASSERT(WM_CORE_LOCKED(sc));
   5839 
   5840 	sc->sc_core_stopping = true;
   5841 
   5842 	/* Must set stopping flags in ascending order. */
   5843 	for (i = 0; i < sc->sc_nqueues; i++) {
   5844 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5845 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5846 
   5847 		mutex_enter(rxq->rxq_lock);
   5848 		rxq->rxq_stopping = true;
   5849 		mutex_exit(rxq->rxq_lock);
   5850 
   5851 		mutex_enter(txq->txq_lock);
   5852 		txq->txq_stopping = true;
   5853 		mutex_exit(txq->txq_lock);
   5854 	}
   5855 }
   5856 
   5857 /*
   5858  * Write interrupt interval value to ITR or EITR
   5859  */
   5860 static void
   5861 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5862 {
   5863 
   5864 	if (!wmq->wmq_set_itr)
   5865 		return;
   5866 
   5867 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5868 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5869 
   5870 		/*
   5871 		 * 82575 doesn't have CNT_INGR field.
   5872 		 * So, overwrite counter field by software.
   5873 		 */
   5874 		if (sc->sc_type == WM_T_82575)
   5875 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5876 		else
   5877 			eitr |= EITR_CNT_INGR;
   5878 
   5879 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5880 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5881 		/*
   5882 		 * 82574 has both ITR and EITR. SET EITR when we use
   5883 		 * the multi queue function with MSI-X.
   5884 		 */
   5885 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5886 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5887 	} else {
   5888 		KASSERT(wmq->wmq_id == 0);
   5889 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5890 	}
   5891 
   5892 	wmq->wmq_set_itr = false;
   5893 }
   5894 
   5895 /*
   5896  * TODO
   5897  * Below dynamic calculation of itr is almost the same as linux igb,
   5898  * however it does not fit to wm(4). So, we will have been disable AIM
   5899  * until we will find appropriate calculation of itr.
   5900  */
   5901 /*
   5902  * calculate interrupt interval value to be going to write register in
   5903  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5904  */
   5905 static void
   5906 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5907 {
   5908 #ifdef NOTYET
   5909 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5910 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5911 	uint32_t avg_size = 0;
   5912 	uint32_t new_itr;
   5913 
   5914 	if (rxq->rxq_packets)
   5915 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5916 	if (txq->txq_packets)
   5917 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5918 
   5919 	if (avg_size == 0) {
   5920 		new_itr = 450; /* restore default value */
   5921 		goto out;
   5922 	}
   5923 
   5924 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5925 	avg_size += 24;
   5926 
   5927 	/* Don't starve jumbo frames */
   5928 	avg_size = uimin(avg_size, 3000);
   5929 
   5930 	/* Give a little boost to mid-size frames */
   5931 	if ((avg_size > 300) && (avg_size < 1200))
   5932 		new_itr = avg_size / 3;
   5933 	else
   5934 		new_itr = avg_size / 2;
   5935 
   5936 out:
   5937 	/*
   5938 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5939 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5940 	 */
   5941 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5942 		new_itr *= 4;
   5943 
   5944 	if (new_itr != wmq->wmq_itr) {
   5945 		wmq->wmq_itr = new_itr;
   5946 		wmq->wmq_set_itr = true;
   5947 	} else
   5948 		wmq->wmq_set_itr = false;
   5949 
   5950 	rxq->rxq_packets = 0;
   5951 	rxq->rxq_bytes = 0;
   5952 	txq->txq_packets = 0;
   5953 	txq->txq_bytes = 0;
   5954 #endif
   5955 }
   5956 
   5957 static void
   5958 wm_init_sysctls(struct wm_softc *sc)
   5959 {
   5960 	struct sysctllog **log;
   5961 	const struct sysctlnode *rnode, *qnode, *cnode;
   5962 	int i, rv;
   5963 	const char *dvname;
   5964 
   5965 	log = &sc->sc_sysctllog;
   5966 	dvname = device_xname(sc->sc_dev);
   5967 
   5968 	rv = sysctl_createv(log, 0, NULL, &rnode,
   5969 	    0, CTLTYPE_NODE, dvname,
   5970 	    SYSCTL_DESCR("wm information and settings"),
   5971 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   5972 	if (rv != 0)
   5973 		goto err;
   5974 
   5975 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   5976 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   5977 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   5978 	if (rv != 0)
   5979 		goto teardown;
   5980 
   5981 	for (i = 0; i < sc->sc_nqueues; i++) {
   5982 		struct wm_queue *wmq = &sc->sc_queue[i];
   5983 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5984 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5985 
   5986 		snprintf(sc->sc_queue[i].sysctlname,
   5987 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
   5988 
   5989 		if (sysctl_createv(log, 0, &rnode, &qnode,
   5990 		    0, CTLTYPE_NODE,
   5991 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
   5992 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   5993 			break;
   5994 		if (sysctl_createv(log, 0, &qnode, &cnode,
   5995 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5996 		    "txq_free", SYSCTL_DESCR("TX queue free"),
   5997 		    NULL, 0, &txq->txq_free,
   5998 		    0, CTL_CREATE, CTL_EOL) != 0)
   5999 			break;
   6000 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6001 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6002 		    "txq_next", SYSCTL_DESCR("TX queue next"),
   6003 		    NULL, 0, &txq->txq_next,
   6004 		    0, CTL_CREATE, CTL_EOL) != 0)
   6005 			break;
   6006 
   6007 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6008 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6009 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
   6010 		    NULL, 0, &rxq->rxq_ptr,
   6011 		    0, CTL_CREATE, CTL_EOL) != 0)
   6012 			break;
   6013 	}
   6014 
   6015 #ifdef WM_DEBUG
   6016 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6017 	    CTLTYPE_INT, "debug_flags",
   6018 	    SYSCTL_DESCR(
   6019 		    "Debug flags:\n"	\
   6020 		    "\t0x01 LINK\n"	\
   6021 		    "\t0x02 TX\n"	\
   6022 		    "\t0x04 RX\n"	\
   6023 		    "\t0x08 GMII\n"	\
   6024 		    "\t0x10 MANAGE\n"	\
   6025 		    "\t0x20 NVM\n"	\
   6026 		    "\t0x40 INIT\n"	\
   6027 		    "\t0x80 LOCK"),
   6028 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
   6029 	if (rv != 0)
   6030 		goto teardown;
   6031 #endif
   6032 
   6033 	return;
   6034 
   6035 teardown:
   6036 	sysctl_teardown(log);
   6037 err:
   6038 	sc->sc_sysctllog = NULL;
   6039 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   6040 	    __func__, rv);
   6041 }
   6042 
   6043 /*
   6044  * wm_init:		[ifnet interface function]
   6045  *
   6046  *	Initialize the interface.
   6047  */
   6048 static int
   6049 wm_init(struct ifnet *ifp)
   6050 {
   6051 	struct wm_softc *sc = ifp->if_softc;
   6052 	int ret;
   6053 
   6054 	WM_CORE_LOCK(sc);
   6055 	ret = wm_init_locked(ifp);
   6056 	WM_CORE_UNLOCK(sc);
   6057 
   6058 	return ret;
   6059 }
   6060 
   6061 static int
   6062 wm_init_locked(struct ifnet *ifp)
   6063 {
   6064 	struct wm_softc *sc = ifp->if_softc;
   6065 	struct ethercom *ec = &sc->sc_ethercom;
   6066 	int i, j, trynum, error = 0;
   6067 	uint32_t reg, sfp_mask = 0;
   6068 
   6069 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6070 		device_xname(sc->sc_dev), __func__));
   6071 	KASSERT(WM_CORE_LOCKED(sc));
   6072 
   6073 	/*
   6074 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   6075 	 * There is a small but measurable benefit to avoiding the adjusment
   6076 	 * of the descriptor so that the headers are aligned, for normal mtu,
   6077 	 * on such platforms.  One possibility is that the DMA itself is
   6078 	 * slightly more efficient if the front of the entire packet (instead
   6079 	 * of the front of the headers) is aligned.
   6080 	 *
   6081 	 * Note we must always set align_tweak to 0 if we are using
   6082 	 * jumbo frames.
   6083 	 */
   6084 #ifdef __NO_STRICT_ALIGNMENT
   6085 	sc->sc_align_tweak = 0;
   6086 #else
   6087 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   6088 		sc->sc_align_tweak = 0;
   6089 	else
   6090 		sc->sc_align_tweak = 2;
   6091 #endif /* __NO_STRICT_ALIGNMENT */
   6092 
   6093 	/* Cancel any pending I/O. */
   6094 	wm_stop_locked(ifp, false, false);
   6095 
   6096 	/* Update statistics before reset */
   6097 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   6098 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   6099 
   6100 	/* >= PCH_SPT hardware workaround before reset. */
   6101 	if (sc->sc_type >= WM_T_PCH_SPT)
   6102 		wm_flush_desc_rings(sc);
   6103 
   6104 	/* Reset the chip to a known state. */
   6105 	wm_reset(sc);
   6106 
   6107 	/*
   6108 	 * AMT based hardware can now take control from firmware
   6109 	 * Do this after reset.
   6110 	 */
   6111 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   6112 		wm_get_hw_control(sc);
   6113 
   6114 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   6115 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   6116 		wm_legacy_irq_quirk_spt(sc);
   6117 
   6118 	/* Init hardware bits */
   6119 	wm_initialize_hardware_bits(sc);
   6120 
   6121 	/* Reset the PHY. */
   6122 	if (sc->sc_flags & WM_F_HAS_MII)
   6123 		wm_gmii_reset(sc);
   6124 
   6125 	if (sc->sc_type >= WM_T_ICH8) {
   6126 		reg = CSR_READ(sc, WMREG_GCR);
   6127 		/*
   6128 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   6129 		 * default after reset.
   6130 		 */
   6131 		if (sc->sc_type == WM_T_ICH8)
   6132 			reg |= GCR_NO_SNOOP_ALL;
   6133 		else
   6134 			reg &= ~GCR_NO_SNOOP_ALL;
   6135 		CSR_WRITE(sc, WMREG_GCR, reg);
   6136 	}
   6137 
   6138 	if ((sc->sc_type >= WM_T_ICH8)
   6139 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   6140 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   6141 
   6142 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6143 		reg |= CTRL_EXT_RO_DIS;
   6144 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6145 	}
   6146 
   6147 	/* Calculate (E)ITR value */
   6148 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   6149 		/*
   6150 		 * For NEWQUEUE's EITR (except for 82575).
   6151 		 * 82575's EITR should be set same throttling value as other
   6152 		 * old controllers' ITR because the interrupt/sec calculation
   6153 		 * is the same, that is, 1,000,000,000 / (N * 256).
   6154 		 *
   6155 		 * 82574's EITR should be set same throttling value as ITR.
   6156 		 *
   6157 		 * For N interrupts/sec, set this value to:
   6158 		 * 1,000,000 / N in contrast to ITR throttoling value.
   6159 		 */
   6160 		sc->sc_itr_init = 450;
   6161 	} else if (sc->sc_type >= WM_T_82543) {
   6162 		/*
   6163 		 * Set up the interrupt throttling register (units of 256ns)
   6164 		 * Note that a footnote in Intel's documentation says this
   6165 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   6166 		 * or 10Mbit mode.  Empirically, it appears to be the case
   6167 		 * that that is also true for the 1024ns units of the other
   6168 		 * interrupt-related timer registers -- so, really, we ought
   6169 		 * to divide this value by 4 when the link speed is low.
   6170 		 *
   6171 		 * XXX implement this division at link speed change!
   6172 		 */
   6173 
   6174 		/*
   6175 		 * For N interrupts/sec, set this value to:
   6176 		 * 1,000,000,000 / (N * 256).  Note that we set the
   6177 		 * absolute and packet timer values to this value
   6178 		 * divided by 4 to get "simple timer" behavior.
   6179 		 */
   6180 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   6181 	}
   6182 
   6183 	error = wm_init_txrx_queues(sc);
   6184 	if (error)
   6185 		goto out;
   6186 
   6187 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   6188 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   6189 	    (sc->sc_type >= WM_T_82575))
   6190 		wm_serdes_power_up_link_82575(sc);
   6191 
   6192 	/* Clear out the VLAN table -- we don't use it (yet). */
   6193 	CSR_WRITE(sc, WMREG_VET, 0);
   6194 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   6195 		trynum = 10; /* Due to hw errata */
   6196 	else
   6197 		trynum = 1;
   6198 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   6199 		for (j = 0; j < trynum; j++)
   6200 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   6201 
   6202 	/*
   6203 	 * Set up flow-control parameters.
   6204 	 *
   6205 	 * XXX Values could probably stand some tuning.
   6206 	 */
   6207 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   6208 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   6209 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   6210 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   6211 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   6212 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   6213 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   6214 	}
   6215 
   6216 	sc->sc_fcrtl = FCRTL_DFLT;
   6217 	if (sc->sc_type < WM_T_82543) {
   6218 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   6219 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   6220 	} else {
   6221 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   6222 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   6223 	}
   6224 
   6225 	if (sc->sc_type == WM_T_80003)
   6226 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   6227 	else
   6228 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   6229 
   6230 	/* Writes the control register. */
   6231 	wm_set_vlan(sc);
   6232 
   6233 	if (sc->sc_flags & WM_F_HAS_MII) {
   6234 		uint16_t kmreg;
   6235 
   6236 		switch (sc->sc_type) {
   6237 		case WM_T_80003:
   6238 		case WM_T_ICH8:
   6239 		case WM_T_ICH9:
   6240 		case WM_T_ICH10:
   6241 		case WM_T_PCH:
   6242 		case WM_T_PCH2:
   6243 		case WM_T_PCH_LPT:
   6244 		case WM_T_PCH_SPT:
   6245 		case WM_T_PCH_CNP:
   6246 			/*
   6247 			 * Set the mac to wait the maximum time between each
   6248 			 * iteration and increase the max iterations when
   6249 			 * polling the phy; this fixes erroneous timeouts at
   6250 			 * 10Mbps.
   6251 			 */
   6252 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   6253 			    0xFFFF);
   6254 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6255 			    &kmreg);
   6256 			kmreg |= 0x3F;
   6257 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6258 			    kmreg);
   6259 			break;
   6260 		default:
   6261 			break;
   6262 		}
   6263 
   6264 		if (sc->sc_type == WM_T_80003) {
   6265 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6266 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6267 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6268 
   6269 			/* Bypass RX and TX FIFO's */
   6270 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6271 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6272 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6273 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6274 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6275 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6276 		}
   6277 	}
   6278 #if 0
   6279 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6280 #endif
   6281 
   6282 	/* Set up checksum offload parameters. */
   6283 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6284 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6285 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6286 		reg |= RXCSUM_IPOFL;
   6287 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6288 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6289 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6290 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6291 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6292 
   6293 	/* Set registers about MSI-X */
   6294 	if (wm_is_using_msix(sc)) {
   6295 		uint32_t ivar, qintr_idx;
   6296 		struct wm_queue *wmq;
   6297 		unsigned int qid;
   6298 
   6299 		if (sc->sc_type == WM_T_82575) {
   6300 			/* Interrupt control */
   6301 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6302 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6303 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6304 
   6305 			/* TX and RX */
   6306 			for (i = 0; i < sc->sc_nqueues; i++) {
   6307 				wmq = &sc->sc_queue[i];
   6308 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6309 				    EITR_TX_QUEUE(wmq->wmq_id)
   6310 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6311 			}
   6312 			/* Link status */
   6313 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6314 			    EITR_OTHER);
   6315 		} else if (sc->sc_type == WM_T_82574) {
   6316 			/* Interrupt control */
   6317 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6318 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6319 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6320 
   6321 			/*
   6322 			 * Workaround issue with spurious interrupts
   6323 			 * in MSI-X mode.
   6324 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6325 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6326 			 */
   6327 			reg = CSR_READ(sc, WMREG_RFCTL);
   6328 			reg |= WMREG_RFCTL_ACKDIS;
   6329 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6330 
   6331 			ivar = 0;
   6332 			/* TX and RX */
   6333 			for (i = 0; i < sc->sc_nqueues; i++) {
   6334 				wmq = &sc->sc_queue[i];
   6335 				qid = wmq->wmq_id;
   6336 				qintr_idx = wmq->wmq_intr_idx;
   6337 
   6338 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6339 				    IVAR_TX_MASK_Q_82574(qid));
   6340 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6341 				    IVAR_RX_MASK_Q_82574(qid));
   6342 			}
   6343 			/* Link status */
   6344 			ivar |= __SHIFTIN((IVAR_VALID_82574
   6345 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   6346 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   6347 		} else {
   6348 			/* Interrupt control */
   6349 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   6350 			    | GPIE_EIAME | GPIE_PBA);
   6351 
   6352 			switch (sc->sc_type) {
   6353 			case WM_T_82580:
   6354 			case WM_T_I350:
   6355 			case WM_T_I354:
   6356 			case WM_T_I210:
   6357 			case WM_T_I211:
   6358 				/* TX and RX */
   6359 				for (i = 0; i < sc->sc_nqueues; i++) {
   6360 					wmq = &sc->sc_queue[i];
   6361 					qid = wmq->wmq_id;
   6362 					qintr_idx = wmq->wmq_intr_idx;
   6363 
   6364 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6365 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6366 					ivar |= __SHIFTIN((qintr_idx
   6367 						| IVAR_VALID),
   6368 					    IVAR_TX_MASK_Q(qid));
   6369 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6370 					ivar |= __SHIFTIN((qintr_idx
   6371 						| IVAR_VALID),
   6372 					    IVAR_RX_MASK_Q(qid));
   6373 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6374 				}
   6375 				break;
   6376 			case WM_T_82576:
   6377 				/* TX and RX */
   6378 				for (i = 0; i < sc->sc_nqueues; i++) {
   6379 					wmq = &sc->sc_queue[i];
   6380 					qid = wmq->wmq_id;
   6381 					qintr_idx = wmq->wmq_intr_idx;
   6382 
   6383 					ivar = CSR_READ(sc,
   6384 					    WMREG_IVAR_Q_82576(qid));
   6385 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6386 					ivar |= __SHIFTIN((qintr_idx
   6387 						| IVAR_VALID),
   6388 					    IVAR_TX_MASK_Q_82576(qid));
   6389 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6390 					ivar |= __SHIFTIN((qintr_idx
   6391 						| IVAR_VALID),
   6392 					    IVAR_RX_MASK_Q_82576(qid));
   6393 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6394 					    ivar);
   6395 				}
   6396 				break;
   6397 			default:
   6398 				break;
   6399 			}
   6400 
   6401 			/* Link status */
   6402 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6403 			    IVAR_MISC_OTHER);
   6404 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6405 		}
   6406 
   6407 		if (wm_is_using_multiqueue(sc)) {
   6408 			wm_init_rss(sc);
   6409 
   6410 			/*
   6411 			** NOTE: Receive Full-Packet Checksum Offload
   6412 			** is mutually exclusive with Multiqueue. However
   6413 			** this is not the same as TCP/IP checksums which
   6414 			** still work.
   6415 			*/
   6416 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6417 			reg |= RXCSUM_PCSD;
   6418 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6419 		}
   6420 	}
   6421 
   6422 	/* Set up the interrupt registers. */
   6423 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6424 
   6425 	/* Enable SFP module insertion interrupt if it's required */
   6426 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6427 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6428 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6429 		sfp_mask = ICR_GPI(0);
   6430 	}
   6431 
   6432 	if (wm_is_using_msix(sc)) {
   6433 		uint32_t mask;
   6434 		struct wm_queue *wmq;
   6435 
   6436 		switch (sc->sc_type) {
   6437 		case WM_T_82574:
   6438 			mask = 0;
   6439 			for (i = 0; i < sc->sc_nqueues; i++) {
   6440 				wmq = &sc->sc_queue[i];
   6441 				mask |= ICR_TXQ(wmq->wmq_id);
   6442 				mask |= ICR_RXQ(wmq->wmq_id);
   6443 			}
   6444 			mask |= ICR_OTHER;
   6445 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6446 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6447 			break;
   6448 		default:
   6449 			if (sc->sc_type == WM_T_82575) {
   6450 				mask = 0;
   6451 				for (i = 0; i < sc->sc_nqueues; i++) {
   6452 					wmq = &sc->sc_queue[i];
   6453 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6454 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6455 				}
   6456 				mask |= EITR_OTHER;
   6457 			} else {
   6458 				mask = 0;
   6459 				for (i = 0; i < sc->sc_nqueues; i++) {
   6460 					wmq = &sc->sc_queue[i];
   6461 					mask |= 1 << wmq->wmq_intr_idx;
   6462 				}
   6463 				mask |= 1 << sc->sc_link_intr_idx;
   6464 			}
   6465 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6466 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6467 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6468 
   6469 			/* For other interrupts */
   6470 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6471 			break;
   6472 		}
   6473 	} else {
   6474 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6475 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6476 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6477 	}
   6478 
   6479 	/* Set up the inter-packet gap. */
   6480 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6481 
   6482 	if (sc->sc_type >= WM_T_82543) {
   6483 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6484 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6485 			wm_itrs_writereg(sc, wmq);
   6486 		}
   6487 		/*
   6488 		 * Link interrupts occur much less than TX
   6489 		 * interrupts and RX interrupts. So, we don't
   6490 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6491 		 * FreeBSD's if_igb.
   6492 		 */
   6493 	}
   6494 
   6495 	/* Set the VLAN ethernetype. */
   6496 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6497 
   6498 	/*
   6499 	 * Set up the transmit control register; we start out with
   6500 	 * a collision distance suitable for FDX, but update it whe
   6501 	 * we resolve the media type.
   6502 	 */
   6503 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6504 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6505 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6506 	if (sc->sc_type >= WM_T_82571)
   6507 		sc->sc_tctl |= TCTL_MULR;
   6508 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6509 
   6510 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6511 		/* Write TDT after TCTL.EN is set. See the document. */
   6512 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6513 	}
   6514 
   6515 	if (sc->sc_type == WM_T_80003) {
   6516 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6517 		reg &= ~TCTL_EXT_GCEX_MASK;
   6518 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6519 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6520 	}
   6521 
   6522 	/* Set the media. */
   6523 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6524 		goto out;
   6525 
   6526 	/* Configure for OS presence */
   6527 	wm_init_manageability(sc);
   6528 
   6529 	/*
   6530 	 * Set up the receive control register; we actually program the
   6531 	 * register when we set the receive filter. Use multicast address
   6532 	 * offset type 0.
   6533 	 *
   6534 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6535 	 * don't enable that feature.
   6536 	 */
   6537 	sc->sc_mchash_type = 0;
   6538 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6539 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6540 
   6541 	/* 82574 use one buffer extended Rx descriptor. */
   6542 	if (sc->sc_type == WM_T_82574)
   6543 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6544 
   6545 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
   6546 		sc->sc_rctl |= RCTL_SECRC;
   6547 
   6548 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6549 	    && (ifp->if_mtu > ETHERMTU)) {
   6550 		sc->sc_rctl |= RCTL_LPE;
   6551 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6552 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6553 	}
   6554 
   6555 	if (MCLBYTES == 2048)
   6556 		sc->sc_rctl |= RCTL_2k;
   6557 	else {
   6558 		if (sc->sc_type >= WM_T_82543) {
   6559 			switch (MCLBYTES) {
   6560 			case 4096:
   6561 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6562 				break;
   6563 			case 8192:
   6564 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6565 				break;
   6566 			case 16384:
   6567 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6568 				break;
   6569 			default:
   6570 				panic("wm_init: MCLBYTES %d unsupported",
   6571 				    MCLBYTES);
   6572 				break;
   6573 			}
   6574 		} else
   6575 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6576 	}
   6577 
   6578 	/* Enable ECC */
   6579 	switch (sc->sc_type) {
   6580 	case WM_T_82571:
   6581 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6582 		reg |= PBA_ECC_CORR_EN;
   6583 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6584 		break;
   6585 	case WM_T_PCH_LPT:
   6586 	case WM_T_PCH_SPT:
   6587 	case WM_T_PCH_CNP:
   6588 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6589 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6590 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6591 
   6592 		sc->sc_ctrl |= CTRL_MEHE;
   6593 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6594 		break;
   6595 	default:
   6596 		break;
   6597 	}
   6598 
   6599 	/*
   6600 	 * Set the receive filter.
   6601 	 *
   6602 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6603 	 * the setting of RCTL.EN in wm_set_filter()
   6604 	 */
   6605 	wm_set_filter(sc);
   6606 
   6607 	/* On 575 and later set RDT only if RX enabled */
   6608 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6609 		int qidx;
   6610 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6611 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6612 			for (i = 0; i < WM_NRXDESC; i++) {
   6613 				mutex_enter(rxq->rxq_lock);
   6614 				wm_init_rxdesc(rxq, i);
   6615 				mutex_exit(rxq->rxq_lock);
   6616 
   6617 			}
   6618 		}
   6619 	}
   6620 
   6621 	wm_unset_stopping_flags(sc);
   6622 
   6623 	/* Start the one second link check clock. */
   6624 	callout_schedule(&sc->sc_tick_ch, hz);
   6625 
   6626 	/* ...all done! */
   6627 	ifp->if_flags |= IFF_RUNNING;
   6628 
   6629  out:
   6630 	/* Save last flags for the callback */
   6631 	sc->sc_if_flags = ifp->if_flags;
   6632 	sc->sc_ec_capenable = ec->ec_capenable;
   6633 	if (error)
   6634 		log(LOG_ERR, "%s: interface not running\n",
   6635 		    device_xname(sc->sc_dev));
   6636 	return error;
   6637 }
   6638 
   6639 /*
   6640  * wm_stop:		[ifnet interface function]
   6641  *
   6642  *	Stop transmission on the interface.
   6643  */
   6644 static void
   6645 wm_stop(struct ifnet *ifp, int disable)
   6646 {
   6647 	struct wm_softc *sc = ifp->if_softc;
   6648 
   6649 	ASSERT_SLEEPABLE();
   6650 
   6651 	WM_CORE_LOCK(sc);
   6652 	wm_stop_locked(ifp, disable ? true : false, true);
   6653 	WM_CORE_UNLOCK(sc);
   6654 
   6655 	/*
   6656 	 * After wm_set_stopping_flags(), it is guaranteed
   6657 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   6658 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   6659 	 * because it can sleep...
   6660 	 * so, call workqueue_wait() here.
   6661 	 */
   6662 	for (int i = 0; i < sc->sc_nqueues; i++)
   6663 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   6664 }
   6665 
   6666 static void
   6667 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   6668 {
   6669 	struct wm_softc *sc = ifp->if_softc;
   6670 	struct wm_txsoft *txs;
   6671 	int i, qidx;
   6672 
   6673 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6674 		device_xname(sc->sc_dev), __func__));
   6675 	KASSERT(WM_CORE_LOCKED(sc));
   6676 
   6677 	wm_set_stopping_flags(sc);
   6678 
   6679 	if (sc->sc_flags & WM_F_HAS_MII) {
   6680 		/* Down the MII. */
   6681 		mii_down(&sc->sc_mii);
   6682 	} else {
   6683 #if 0
   6684 		/* Should we clear PHY's status properly? */
   6685 		wm_reset(sc);
   6686 #endif
   6687 	}
   6688 
   6689 	/* Stop the transmit and receive processes. */
   6690 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6691 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6692 	sc->sc_rctl &= ~RCTL_EN;
   6693 
   6694 	/*
   6695 	 * Clear the interrupt mask to ensure the device cannot assert its
   6696 	 * interrupt line.
   6697 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6698 	 * service any currently pending or shared interrupt.
   6699 	 */
   6700 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6701 	sc->sc_icr = 0;
   6702 	if (wm_is_using_msix(sc)) {
   6703 		if (sc->sc_type != WM_T_82574) {
   6704 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6705 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6706 		} else
   6707 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6708 	}
   6709 
   6710 	/*
   6711 	 * Stop callouts after interrupts are disabled; if we have
   6712 	 * to wait for them, we will be releasing the CORE_LOCK
   6713 	 * briefly, which will unblock interrupts on the current CPU.
   6714 	 */
   6715 
   6716 	/* Stop the one second clock. */
   6717 	if (wait)
   6718 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   6719 	else
   6720 		callout_stop(&sc->sc_tick_ch);
   6721 
   6722 	/* Stop the 82547 Tx FIFO stall check timer. */
   6723 	if (sc->sc_type == WM_T_82547) {
   6724 		if (wait)
   6725 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   6726 		else
   6727 			callout_stop(&sc->sc_txfifo_ch);
   6728 	}
   6729 
   6730 	/* Release any queued transmit buffers. */
   6731 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6732 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6733 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6734 		struct mbuf *m;
   6735 
   6736 		mutex_enter(txq->txq_lock);
   6737 		txq->txq_sending = false; /* Ensure watchdog disabled */
   6738 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6739 			txs = &txq->txq_soft[i];
   6740 			if (txs->txs_mbuf != NULL) {
   6741 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6742 				m_freem(txs->txs_mbuf);
   6743 				txs->txs_mbuf = NULL;
   6744 			}
   6745 		}
   6746 		/* Drain txq_interq */
   6747 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6748 			m_freem(m);
   6749 		mutex_exit(txq->txq_lock);
   6750 	}
   6751 
   6752 	/* Mark the interface as down and cancel the watchdog timer. */
   6753 	ifp->if_flags &= ~IFF_RUNNING;
   6754 
   6755 	if (disable) {
   6756 		for (i = 0; i < sc->sc_nqueues; i++) {
   6757 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6758 			mutex_enter(rxq->rxq_lock);
   6759 			wm_rxdrain(rxq);
   6760 			mutex_exit(rxq->rxq_lock);
   6761 		}
   6762 	}
   6763 
   6764 #if 0 /* notyet */
   6765 	if (sc->sc_type >= WM_T_82544)
   6766 		CSR_WRITE(sc, WMREG_WUC, 0);
   6767 #endif
   6768 }
   6769 
   6770 static void
   6771 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6772 {
   6773 	struct mbuf *m;
   6774 	int i;
   6775 
   6776 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6777 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6778 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6779 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6780 		    m->m_data, m->m_len, m->m_flags);
   6781 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6782 	    i, i == 1 ? "" : "s");
   6783 }
   6784 
   6785 /*
   6786  * wm_82547_txfifo_stall:
   6787  *
   6788  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6789  *	reset the FIFO pointers, and restart packet transmission.
   6790  */
   6791 static void
   6792 wm_82547_txfifo_stall(void *arg)
   6793 {
   6794 	struct wm_softc *sc = arg;
   6795 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6796 
   6797 	mutex_enter(txq->txq_lock);
   6798 
   6799 	if (txq->txq_stopping)
   6800 		goto out;
   6801 
   6802 	if (txq->txq_fifo_stall) {
   6803 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6804 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6805 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6806 			/*
   6807 			 * Packets have drained.  Stop transmitter, reset
   6808 			 * FIFO pointers, restart transmitter, and kick
   6809 			 * the packet queue.
   6810 			 */
   6811 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6812 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6813 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6814 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6815 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6816 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6817 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6818 			CSR_WRITE_FLUSH(sc);
   6819 
   6820 			txq->txq_fifo_head = 0;
   6821 			txq->txq_fifo_stall = 0;
   6822 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6823 		} else {
   6824 			/*
   6825 			 * Still waiting for packets to drain; try again in
   6826 			 * another tick.
   6827 			 */
   6828 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6829 		}
   6830 	}
   6831 
   6832 out:
   6833 	mutex_exit(txq->txq_lock);
   6834 }
   6835 
   6836 /*
   6837  * wm_82547_txfifo_bugchk:
   6838  *
   6839  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6840  *	prevent enqueueing a packet that would wrap around the end
   6841  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6842  *
   6843  *	We do this by checking the amount of space before the end
   6844  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6845  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6846  *	the internal FIFO pointers to the beginning, and restart
   6847  *	transmission on the interface.
   6848  */
   6849 #define	WM_FIFO_HDR		0x10
   6850 #define	WM_82547_PAD_LEN	0x3e0
   6851 static int
   6852 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6853 {
   6854 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6855 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6856 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6857 
   6858 	/* Just return if already stalled. */
   6859 	if (txq->txq_fifo_stall)
   6860 		return 1;
   6861 
   6862 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6863 		/* Stall only occurs in half-duplex mode. */
   6864 		goto send_packet;
   6865 	}
   6866 
   6867 	if (len >= WM_82547_PAD_LEN + space) {
   6868 		txq->txq_fifo_stall = 1;
   6869 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6870 		return 1;
   6871 	}
   6872 
   6873  send_packet:
   6874 	txq->txq_fifo_head += len;
   6875 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6876 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6877 
   6878 	return 0;
   6879 }
   6880 
   6881 static int
   6882 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6883 {
   6884 	int error;
   6885 
   6886 	/*
   6887 	 * Allocate the control data structures, and create and load the
   6888 	 * DMA map for it.
   6889 	 *
   6890 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6891 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6892 	 * both sets within the same 4G segment.
   6893 	 */
   6894 	if (sc->sc_type < WM_T_82544)
   6895 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6896 	else
   6897 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6898 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6899 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6900 	else
   6901 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6902 
   6903 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6904 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6905 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6906 		aprint_error_dev(sc->sc_dev,
   6907 		    "unable to allocate TX control data, error = %d\n",
   6908 		    error);
   6909 		goto fail_0;
   6910 	}
   6911 
   6912 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6913 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6914 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6915 		aprint_error_dev(sc->sc_dev,
   6916 		    "unable to map TX control data, error = %d\n", error);
   6917 		goto fail_1;
   6918 	}
   6919 
   6920 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6921 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6922 		aprint_error_dev(sc->sc_dev,
   6923 		    "unable to create TX control data DMA map, error = %d\n",
   6924 		    error);
   6925 		goto fail_2;
   6926 	}
   6927 
   6928 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6929 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6930 		aprint_error_dev(sc->sc_dev,
   6931 		    "unable to load TX control data DMA map, error = %d\n",
   6932 		    error);
   6933 		goto fail_3;
   6934 	}
   6935 
   6936 	return 0;
   6937 
   6938  fail_3:
   6939 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6940  fail_2:
   6941 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6942 	    WM_TXDESCS_SIZE(txq));
   6943  fail_1:
   6944 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6945  fail_0:
   6946 	return error;
   6947 }
   6948 
   6949 static void
   6950 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6951 {
   6952 
   6953 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6954 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6955 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6956 	    WM_TXDESCS_SIZE(txq));
   6957 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6958 }
   6959 
   6960 static int
   6961 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6962 {
   6963 	int error;
   6964 	size_t rxq_descs_size;
   6965 
   6966 	/*
   6967 	 * Allocate the control data structures, and create and load the
   6968 	 * DMA map for it.
   6969 	 *
   6970 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6971 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6972 	 * both sets within the same 4G segment.
   6973 	 */
   6974 	rxq->rxq_ndesc = WM_NRXDESC;
   6975 	if (sc->sc_type == WM_T_82574)
   6976 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6977 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6978 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6979 	else
   6980 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6981 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6982 
   6983 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6984 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6985 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6986 		aprint_error_dev(sc->sc_dev,
   6987 		    "unable to allocate RX control data, error = %d\n",
   6988 		    error);
   6989 		goto fail_0;
   6990 	}
   6991 
   6992 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6993 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6994 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6995 		aprint_error_dev(sc->sc_dev,
   6996 		    "unable to map RX control data, error = %d\n", error);
   6997 		goto fail_1;
   6998 	}
   6999 
   7000 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   7001 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   7002 		aprint_error_dev(sc->sc_dev,
   7003 		    "unable to create RX control data DMA map, error = %d\n",
   7004 		    error);
   7005 		goto fail_2;
   7006 	}
   7007 
   7008 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   7009 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   7010 		aprint_error_dev(sc->sc_dev,
   7011 		    "unable to load RX control data DMA map, error = %d\n",
   7012 		    error);
   7013 		goto fail_3;
   7014 	}
   7015 
   7016 	return 0;
   7017 
   7018  fail_3:
   7019 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7020  fail_2:
   7021 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7022 	    rxq_descs_size);
   7023  fail_1:
   7024 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7025  fail_0:
   7026 	return error;
   7027 }
   7028 
   7029 static void
   7030 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7031 {
   7032 
   7033 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7034 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7035 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7036 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   7037 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7038 }
   7039 
   7040 
   7041 static int
   7042 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7043 {
   7044 	int i, error;
   7045 
   7046 	/* Create the transmit buffer DMA maps. */
   7047 	WM_TXQUEUELEN(txq) =
   7048 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   7049 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   7050 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7051 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   7052 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   7053 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   7054 			aprint_error_dev(sc->sc_dev,
   7055 			    "unable to create Tx DMA map %d, error = %d\n",
   7056 			    i, error);
   7057 			goto fail;
   7058 		}
   7059 	}
   7060 
   7061 	return 0;
   7062 
   7063  fail:
   7064 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7065 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7066 			bus_dmamap_destroy(sc->sc_dmat,
   7067 			    txq->txq_soft[i].txs_dmamap);
   7068 	}
   7069 	return error;
   7070 }
   7071 
   7072 static void
   7073 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7074 {
   7075 	int i;
   7076 
   7077 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7078 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7079 			bus_dmamap_destroy(sc->sc_dmat,
   7080 			    txq->txq_soft[i].txs_dmamap);
   7081 	}
   7082 }
   7083 
   7084 static int
   7085 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7086 {
   7087 	int i, error;
   7088 
   7089 	/* Create the receive buffer DMA maps. */
   7090 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7091 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   7092 			    MCLBYTES, 0, 0,
   7093 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   7094 			aprint_error_dev(sc->sc_dev,
   7095 			    "unable to create Rx DMA map %d error = %d\n",
   7096 			    i, error);
   7097 			goto fail;
   7098 		}
   7099 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   7100 	}
   7101 
   7102 	return 0;
   7103 
   7104  fail:
   7105 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7106 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7107 			bus_dmamap_destroy(sc->sc_dmat,
   7108 			    rxq->rxq_soft[i].rxs_dmamap);
   7109 	}
   7110 	return error;
   7111 }
   7112 
   7113 static void
   7114 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7115 {
   7116 	int i;
   7117 
   7118 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7119 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7120 			bus_dmamap_destroy(sc->sc_dmat,
   7121 			    rxq->rxq_soft[i].rxs_dmamap);
   7122 	}
   7123 }
   7124 
   7125 /*
   7126  * wm_alloc_quques:
   7127  *	Allocate {tx,rx}descs and {tx,rx} buffers
   7128  */
   7129 static int
   7130 wm_alloc_txrx_queues(struct wm_softc *sc)
   7131 {
   7132 	int i, error, tx_done, rx_done;
   7133 
   7134 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   7135 	    KM_SLEEP);
   7136 	if (sc->sc_queue == NULL) {
   7137 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   7138 		error = ENOMEM;
   7139 		goto fail_0;
   7140 	}
   7141 
   7142 	/* For transmission */
   7143 	error = 0;
   7144 	tx_done = 0;
   7145 	for (i = 0; i < sc->sc_nqueues; i++) {
   7146 #ifdef WM_EVENT_COUNTERS
   7147 		int j;
   7148 		const char *xname;
   7149 #endif
   7150 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7151 		txq->txq_sc = sc;
   7152 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7153 
   7154 		error = wm_alloc_tx_descs(sc, txq);
   7155 		if (error)
   7156 			break;
   7157 		error = wm_alloc_tx_buffer(sc, txq);
   7158 		if (error) {
   7159 			wm_free_tx_descs(sc, txq);
   7160 			break;
   7161 		}
   7162 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   7163 		if (txq->txq_interq == NULL) {
   7164 			wm_free_tx_descs(sc, txq);
   7165 			wm_free_tx_buffer(sc, txq);
   7166 			error = ENOMEM;
   7167 			break;
   7168 		}
   7169 
   7170 #ifdef WM_EVENT_COUNTERS
   7171 		xname = device_xname(sc->sc_dev);
   7172 
   7173 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   7174 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   7175 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   7176 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   7177 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   7178 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   7179 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   7180 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   7181 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   7182 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   7183 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   7184 
   7185 		for (j = 0; j < WM_NTXSEGS; j++) {
   7186 			snprintf(txq->txq_txseg_evcnt_names[j],
   7187 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   7188 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   7189 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   7190 		}
   7191 
   7192 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   7193 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   7194 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   7195 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   7196 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   7197 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   7198 #endif /* WM_EVENT_COUNTERS */
   7199 
   7200 		tx_done++;
   7201 	}
   7202 	if (error)
   7203 		goto fail_1;
   7204 
   7205 	/* For receive */
   7206 	error = 0;
   7207 	rx_done = 0;
   7208 	for (i = 0; i < sc->sc_nqueues; i++) {
   7209 #ifdef WM_EVENT_COUNTERS
   7210 		const char *xname;
   7211 #endif
   7212 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7213 		rxq->rxq_sc = sc;
   7214 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7215 
   7216 		error = wm_alloc_rx_descs(sc, rxq);
   7217 		if (error)
   7218 			break;
   7219 
   7220 		error = wm_alloc_rx_buffer(sc, rxq);
   7221 		if (error) {
   7222 			wm_free_rx_descs(sc, rxq);
   7223 			break;
   7224 		}
   7225 
   7226 #ifdef WM_EVENT_COUNTERS
   7227 		xname = device_xname(sc->sc_dev);
   7228 
   7229 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   7230 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   7231 
   7232 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   7233 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   7234 #endif /* WM_EVENT_COUNTERS */
   7235 
   7236 		rx_done++;
   7237 	}
   7238 	if (error)
   7239 		goto fail_2;
   7240 
   7241 	return 0;
   7242 
   7243  fail_2:
   7244 	for (i = 0; i < rx_done; i++) {
   7245 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7246 		wm_free_rx_buffer(sc, rxq);
   7247 		wm_free_rx_descs(sc, rxq);
   7248 		if (rxq->rxq_lock)
   7249 			mutex_obj_free(rxq->rxq_lock);
   7250 	}
   7251  fail_1:
   7252 	for (i = 0; i < tx_done; i++) {
   7253 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7254 		pcq_destroy(txq->txq_interq);
   7255 		wm_free_tx_buffer(sc, txq);
   7256 		wm_free_tx_descs(sc, txq);
   7257 		if (txq->txq_lock)
   7258 			mutex_obj_free(txq->txq_lock);
   7259 	}
   7260 
   7261 	kmem_free(sc->sc_queue,
   7262 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   7263  fail_0:
   7264 	return error;
   7265 }
   7266 
   7267 /*
   7268  * wm_free_quques:
   7269  *	Free {tx,rx}descs and {tx,rx} buffers
   7270  */
   7271 static void
   7272 wm_free_txrx_queues(struct wm_softc *sc)
   7273 {
   7274 	int i;
   7275 
   7276 	for (i = 0; i < sc->sc_nqueues; i++) {
   7277 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7278 
   7279 #ifdef WM_EVENT_COUNTERS
   7280 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7281 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7282 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7283 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7284 #endif /* WM_EVENT_COUNTERS */
   7285 
   7286 		wm_free_rx_buffer(sc, rxq);
   7287 		wm_free_rx_descs(sc, rxq);
   7288 		if (rxq->rxq_lock)
   7289 			mutex_obj_free(rxq->rxq_lock);
   7290 	}
   7291 
   7292 	for (i = 0; i < sc->sc_nqueues; i++) {
   7293 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7294 		struct mbuf *m;
   7295 #ifdef WM_EVENT_COUNTERS
   7296 		int j;
   7297 
   7298 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7299 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7300 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7301 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7302 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7303 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7304 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7305 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7306 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7307 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7308 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7309 
   7310 		for (j = 0; j < WM_NTXSEGS; j++)
   7311 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7312 
   7313 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7314 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7315 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7316 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7317 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7318 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   7319 #endif /* WM_EVENT_COUNTERS */
   7320 
   7321 		/* Drain txq_interq */
   7322 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7323 			m_freem(m);
   7324 		pcq_destroy(txq->txq_interq);
   7325 
   7326 		wm_free_tx_buffer(sc, txq);
   7327 		wm_free_tx_descs(sc, txq);
   7328 		if (txq->txq_lock)
   7329 			mutex_obj_free(txq->txq_lock);
   7330 	}
   7331 
   7332 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   7333 }
   7334 
   7335 static void
   7336 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7337 {
   7338 
   7339 	KASSERT(mutex_owned(txq->txq_lock));
   7340 
   7341 	/* Initialize the transmit descriptor ring. */
   7342 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   7343 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   7344 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7345 	txq->txq_free = WM_NTXDESC(txq);
   7346 	txq->txq_next = 0;
   7347 }
   7348 
   7349 static void
   7350 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7351     struct wm_txqueue *txq)
   7352 {
   7353 
   7354 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7355 		device_xname(sc->sc_dev), __func__));
   7356 	KASSERT(mutex_owned(txq->txq_lock));
   7357 
   7358 	if (sc->sc_type < WM_T_82543) {
   7359 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   7360 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   7361 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   7362 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   7363 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   7364 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   7365 	} else {
   7366 		int qid = wmq->wmq_id;
   7367 
   7368 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   7369 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   7370 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   7371 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   7372 
   7373 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7374 			/*
   7375 			 * Don't write TDT before TCTL.EN is set.
   7376 			 * See the document.
   7377 			 */
   7378 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7379 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7380 			    | TXDCTL_WTHRESH(0));
   7381 		else {
   7382 			/* XXX should update with AIM? */
   7383 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7384 			if (sc->sc_type >= WM_T_82540) {
   7385 				/* Should be the same */
   7386 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7387 			}
   7388 
   7389 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7390 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7391 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7392 		}
   7393 	}
   7394 }
   7395 
   7396 static void
   7397 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7398 {
   7399 	int i;
   7400 
   7401 	KASSERT(mutex_owned(txq->txq_lock));
   7402 
   7403 	/* Initialize the transmit job descriptors. */
   7404 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7405 		txq->txq_soft[i].txs_mbuf = NULL;
   7406 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7407 	txq->txq_snext = 0;
   7408 	txq->txq_sdirty = 0;
   7409 }
   7410 
   7411 static void
   7412 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7413     struct wm_txqueue *txq)
   7414 {
   7415 
   7416 	KASSERT(mutex_owned(txq->txq_lock));
   7417 
   7418 	/*
   7419 	 * Set up some register offsets that are different between
   7420 	 * the i82542 and the i82543 and later chips.
   7421 	 */
   7422 	if (sc->sc_type < WM_T_82543)
   7423 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7424 	else
   7425 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7426 
   7427 	wm_init_tx_descs(sc, txq);
   7428 	wm_init_tx_regs(sc, wmq, txq);
   7429 	wm_init_tx_buffer(sc, txq);
   7430 
   7431 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7432 	txq->txq_sending = false;
   7433 }
   7434 
   7435 static void
   7436 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7437     struct wm_rxqueue *rxq)
   7438 {
   7439 
   7440 	KASSERT(mutex_owned(rxq->rxq_lock));
   7441 
   7442 	/*
   7443 	 * Initialize the receive descriptor and receive job
   7444 	 * descriptor rings.
   7445 	 */
   7446 	if (sc->sc_type < WM_T_82543) {
   7447 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7448 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7449 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7450 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7451 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7452 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7453 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7454 
   7455 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7456 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7457 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7458 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7459 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7460 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7461 	} else {
   7462 		int qid = wmq->wmq_id;
   7463 
   7464 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7465 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7466 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7467 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7468 
   7469 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7470 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7471 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7472 
   7473 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7474 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7475 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7476 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7477 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7478 			    | RXDCTL_WTHRESH(1));
   7479 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7480 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7481 		} else {
   7482 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7483 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7484 			/* XXX should update with AIM? */
   7485 			CSR_WRITE(sc, WMREG_RDTR,
   7486 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7487 			/* MUST be same */
   7488 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7489 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7490 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7491 		}
   7492 	}
   7493 }
   7494 
   7495 static int
   7496 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7497 {
   7498 	struct wm_rxsoft *rxs;
   7499 	int error, i;
   7500 
   7501 	KASSERT(mutex_owned(rxq->rxq_lock));
   7502 
   7503 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7504 		rxs = &rxq->rxq_soft[i];
   7505 		if (rxs->rxs_mbuf == NULL) {
   7506 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7507 				log(LOG_ERR, "%s: unable to allocate or map "
   7508 				    "rx buffer %d, error = %d\n",
   7509 				    device_xname(sc->sc_dev), i, error);
   7510 				/*
   7511 				 * XXX Should attempt to run with fewer receive
   7512 				 * XXX buffers instead of just failing.
   7513 				 */
   7514 				wm_rxdrain(rxq);
   7515 				return ENOMEM;
   7516 			}
   7517 		} else {
   7518 			/*
   7519 			 * For 82575 and 82576, the RX descriptors must be
   7520 			 * initialized after the setting of RCTL.EN in
   7521 			 * wm_set_filter()
   7522 			 */
   7523 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7524 				wm_init_rxdesc(rxq, i);
   7525 		}
   7526 	}
   7527 	rxq->rxq_ptr = 0;
   7528 	rxq->rxq_discard = 0;
   7529 	WM_RXCHAIN_RESET(rxq);
   7530 
   7531 	return 0;
   7532 }
   7533 
   7534 static int
   7535 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7536     struct wm_rxqueue *rxq)
   7537 {
   7538 
   7539 	KASSERT(mutex_owned(rxq->rxq_lock));
   7540 
   7541 	/*
   7542 	 * Set up some register offsets that are different between
   7543 	 * the i82542 and the i82543 and later chips.
   7544 	 */
   7545 	if (sc->sc_type < WM_T_82543)
   7546 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7547 	else
   7548 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7549 
   7550 	wm_init_rx_regs(sc, wmq, rxq);
   7551 	return wm_init_rx_buffer(sc, rxq);
   7552 }
   7553 
   7554 /*
   7555  * wm_init_quques:
   7556  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7557  */
   7558 static int
   7559 wm_init_txrx_queues(struct wm_softc *sc)
   7560 {
   7561 	int i, error = 0;
   7562 
   7563 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7564 		device_xname(sc->sc_dev), __func__));
   7565 
   7566 	for (i = 0; i < sc->sc_nqueues; i++) {
   7567 		struct wm_queue *wmq = &sc->sc_queue[i];
   7568 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7569 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7570 
   7571 		/*
   7572 		 * TODO
   7573 		 * Currently, use constant variable instead of AIM.
   7574 		 * Furthermore, the interrupt interval of multiqueue which use
   7575 		 * polling mode is less than default value.
   7576 		 * More tuning and AIM are required.
   7577 		 */
   7578 		if (wm_is_using_multiqueue(sc))
   7579 			wmq->wmq_itr = 50;
   7580 		else
   7581 			wmq->wmq_itr = sc->sc_itr_init;
   7582 		wmq->wmq_set_itr = true;
   7583 
   7584 		mutex_enter(txq->txq_lock);
   7585 		wm_init_tx_queue(sc, wmq, txq);
   7586 		mutex_exit(txq->txq_lock);
   7587 
   7588 		mutex_enter(rxq->rxq_lock);
   7589 		error = wm_init_rx_queue(sc, wmq, rxq);
   7590 		mutex_exit(rxq->rxq_lock);
   7591 		if (error)
   7592 			break;
   7593 	}
   7594 
   7595 	return error;
   7596 }
   7597 
   7598 /*
   7599  * wm_tx_offload:
   7600  *
   7601  *	Set up TCP/IP checksumming parameters for the
   7602  *	specified packet.
   7603  */
   7604 static void
   7605 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7606     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7607 {
   7608 	struct mbuf *m0 = txs->txs_mbuf;
   7609 	struct livengood_tcpip_ctxdesc *t;
   7610 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7611 	uint32_t ipcse;
   7612 	struct ether_header *eh;
   7613 	int offset, iphl;
   7614 	uint8_t fields;
   7615 
   7616 	/*
   7617 	 * XXX It would be nice if the mbuf pkthdr had offset
   7618 	 * fields for the protocol headers.
   7619 	 */
   7620 
   7621 	eh = mtod(m0, struct ether_header *);
   7622 	switch (htons(eh->ether_type)) {
   7623 	case ETHERTYPE_IP:
   7624 	case ETHERTYPE_IPV6:
   7625 		offset = ETHER_HDR_LEN;
   7626 		break;
   7627 
   7628 	case ETHERTYPE_VLAN:
   7629 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7630 		break;
   7631 
   7632 	default:
   7633 		/* Don't support this protocol or encapsulation. */
   7634 		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   7635 		txq->txq_last_hw_ipcs = 0;
   7636 		txq->txq_last_hw_tucs = 0;
   7637 		*fieldsp = 0;
   7638 		*cmdp = 0;
   7639 		return;
   7640 	}
   7641 
   7642 	if ((m0->m_pkthdr.csum_flags &
   7643 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7644 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7645 	} else
   7646 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7647 
   7648 	ipcse = offset + iphl - 1;
   7649 
   7650 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7651 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7652 	seg = 0;
   7653 	fields = 0;
   7654 
   7655 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7656 		int hlen = offset + iphl;
   7657 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7658 
   7659 		if (__predict_false(m0->m_len <
   7660 				    (hlen + sizeof(struct tcphdr)))) {
   7661 			/*
   7662 			 * TCP/IP headers are not in the first mbuf; we need
   7663 			 * to do this the slow and painful way. Let's just
   7664 			 * hope this doesn't happen very often.
   7665 			 */
   7666 			struct tcphdr th;
   7667 
   7668 			WM_Q_EVCNT_INCR(txq, tsopain);
   7669 
   7670 			m_copydata(m0, hlen, sizeof(th), &th);
   7671 			if (v4) {
   7672 				struct ip ip;
   7673 
   7674 				m_copydata(m0, offset, sizeof(ip), &ip);
   7675 				ip.ip_len = 0;
   7676 				m_copyback(m0,
   7677 				    offset + offsetof(struct ip, ip_len),
   7678 				    sizeof(ip.ip_len), &ip.ip_len);
   7679 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7680 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7681 			} else {
   7682 				struct ip6_hdr ip6;
   7683 
   7684 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7685 				ip6.ip6_plen = 0;
   7686 				m_copyback(m0,
   7687 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7688 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7689 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7690 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7691 			}
   7692 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7693 			    sizeof(th.th_sum), &th.th_sum);
   7694 
   7695 			hlen += th.th_off << 2;
   7696 		} else {
   7697 			/*
   7698 			 * TCP/IP headers are in the first mbuf; we can do
   7699 			 * this the easy way.
   7700 			 */
   7701 			struct tcphdr *th;
   7702 
   7703 			if (v4) {
   7704 				struct ip *ip =
   7705 				    (void *)(mtod(m0, char *) + offset);
   7706 				th = (void *)(mtod(m0, char *) + hlen);
   7707 
   7708 				ip->ip_len = 0;
   7709 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7710 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7711 			} else {
   7712 				struct ip6_hdr *ip6 =
   7713 				    (void *)(mtod(m0, char *) + offset);
   7714 				th = (void *)(mtod(m0, char *) + hlen);
   7715 
   7716 				ip6->ip6_plen = 0;
   7717 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7718 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7719 			}
   7720 			hlen += th->th_off << 2;
   7721 		}
   7722 
   7723 		if (v4) {
   7724 			WM_Q_EVCNT_INCR(txq, tso);
   7725 			cmdlen |= WTX_TCPIP_CMD_IP;
   7726 		} else {
   7727 			WM_Q_EVCNT_INCR(txq, tso6);
   7728 			ipcse = 0;
   7729 		}
   7730 		cmd |= WTX_TCPIP_CMD_TSE;
   7731 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7732 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7733 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7734 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7735 	}
   7736 
   7737 	/*
   7738 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7739 	 * offload feature, if we load the context descriptor, we
   7740 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7741 	 */
   7742 
   7743 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7744 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7745 	    WTX_TCPIP_IPCSE(ipcse);
   7746 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7747 		WM_Q_EVCNT_INCR(txq, ipsum);
   7748 		fields |= WTX_IXSM;
   7749 	}
   7750 
   7751 	offset += iphl;
   7752 
   7753 	if (m0->m_pkthdr.csum_flags &
   7754 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7755 		WM_Q_EVCNT_INCR(txq, tusum);
   7756 		fields |= WTX_TXSM;
   7757 		tucs = WTX_TCPIP_TUCSS(offset) |
   7758 		    WTX_TCPIP_TUCSO(offset +
   7759 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7760 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7761 	} else if ((m0->m_pkthdr.csum_flags &
   7762 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7763 		WM_Q_EVCNT_INCR(txq, tusum6);
   7764 		fields |= WTX_TXSM;
   7765 		tucs = WTX_TCPIP_TUCSS(offset) |
   7766 		    WTX_TCPIP_TUCSO(offset +
   7767 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7768 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7769 	} else {
   7770 		/* Just initialize it to a valid TCP context. */
   7771 		tucs = WTX_TCPIP_TUCSS(offset) |
   7772 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7773 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7774 	}
   7775 
   7776 	*cmdp = cmd;
   7777 	*fieldsp = fields;
   7778 
   7779 	/*
   7780 	 * We don't have to write context descriptor for every packet
   7781 	 * except for 82574. For 82574, we must write context descriptor
   7782 	 * for every packet when we use two descriptor queues.
   7783 	 *
   7784 	 * The 82574L can only remember the *last* context used
   7785 	 * regardless of queue that it was use for.  We cannot reuse
   7786 	 * contexts on this hardware platform and must generate a new
   7787 	 * context every time.  82574L hardware spec, section 7.2.6,
   7788 	 * second note.
   7789 	 */
   7790 	if (sc->sc_nqueues < 2) {
   7791 		/*
   7792 		 * Setting up new checksum offload context for every
   7793 		 * frames takes a lot of processing time for hardware.
   7794 		 * This also reduces performance a lot for small sized
   7795 		 * frames so avoid it if driver can use previously
   7796 		 * configured checksum offload context.
   7797 		 * For TSO, in theory we can use the same TSO context only if
   7798 		 * frame is the same type(IP/TCP) and the same MSS. However
   7799 		 * checking whether a frame has the same IP/TCP structure is
   7800 		 * hard thing so just ignore that and always restablish a
   7801 		 * new TSO context.
   7802 		 */
   7803 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   7804 		    == 0) {
   7805 			if (txq->txq_last_hw_cmd == cmd &&
   7806 			    txq->txq_last_hw_fields == fields &&
   7807 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   7808 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   7809 				WM_Q_EVCNT_INCR(txq, skipcontext);
   7810 				return;
   7811 			}
   7812 		}
   7813 
   7814 		txq->txq_last_hw_cmd = cmd;
   7815 		txq->txq_last_hw_fields = fields;
   7816 		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   7817 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   7818 	}
   7819 
   7820 	/* Fill in the context descriptor. */
   7821 	t = (struct livengood_tcpip_ctxdesc *)
   7822 	    &txq->txq_descs[txq->txq_next];
   7823 	t->tcpip_ipcs = htole32(ipcs);
   7824 	t->tcpip_tucs = htole32(tucs);
   7825 	t->tcpip_cmdlen = htole32(cmdlen);
   7826 	t->tcpip_seg = htole32(seg);
   7827 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7828 
   7829 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7830 	txs->txs_ndesc++;
   7831 }
   7832 
   7833 static inline int
   7834 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7835 {
   7836 	struct wm_softc *sc = ifp->if_softc;
   7837 	u_int cpuid = cpu_index(curcpu());
   7838 
   7839 	/*
   7840 	 * Currently, simple distribute strategy.
   7841 	 * TODO:
   7842 	 * distribute by flowid(RSS has value).
   7843 	 */
   7844 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7845 }
   7846 
   7847 static inline bool
   7848 wm_linkdown_discard(struct wm_txqueue *txq)
   7849 {
   7850 
   7851 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
   7852 		return true;
   7853 
   7854 	return false;
   7855 }
   7856 
   7857 /*
   7858  * wm_start:		[ifnet interface function]
   7859  *
   7860  *	Start packet transmission on the interface.
   7861  */
   7862 static void
   7863 wm_start(struct ifnet *ifp)
   7864 {
   7865 	struct wm_softc *sc = ifp->if_softc;
   7866 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7867 
   7868 #ifdef WM_MPSAFE
   7869 	KASSERT(if_is_mpsafe(ifp));
   7870 #endif
   7871 	/*
   7872 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   7873 	 */
   7874 
   7875 	mutex_enter(txq->txq_lock);
   7876 	if (!txq->txq_stopping)
   7877 		wm_start_locked(ifp);
   7878 	mutex_exit(txq->txq_lock);
   7879 }
   7880 
   7881 static void
   7882 wm_start_locked(struct ifnet *ifp)
   7883 {
   7884 	struct wm_softc *sc = ifp->if_softc;
   7885 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7886 
   7887 	wm_send_common_locked(ifp, txq, false);
   7888 }
   7889 
   7890 static int
   7891 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7892 {
   7893 	int qid;
   7894 	struct wm_softc *sc = ifp->if_softc;
   7895 	struct wm_txqueue *txq;
   7896 
   7897 	qid = wm_select_txqueue(ifp, m);
   7898 	txq = &sc->sc_queue[qid].wmq_txq;
   7899 
   7900 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7901 		m_freem(m);
   7902 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7903 		return ENOBUFS;
   7904 	}
   7905 
   7906 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   7907 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   7908 	if (m->m_flags & M_MCAST)
   7909 		if_statinc_ref(nsr, if_omcasts);
   7910 	IF_STAT_PUTREF(ifp);
   7911 
   7912 	if (mutex_tryenter(txq->txq_lock)) {
   7913 		if (!txq->txq_stopping)
   7914 			wm_transmit_locked(ifp, txq);
   7915 		mutex_exit(txq->txq_lock);
   7916 	}
   7917 
   7918 	return 0;
   7919 }
   7920 
   7921 static void
   7922 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7923 {
   7924 
   7925 	wm_send_common_locked(ifp, txq, true);
   7926 }
   7927 
   7928 static void
   7929 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7930     bool is_transmit)
   7931 {
   7932 	struct wm_softc *sc = ifp->if_softc;
   7933 	struct mbuf *m0;
   7934 	struct wm_txsoft *txs;
   7935 	bus_dmamap_t dmamap;
   7936 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7937 	bus_addr_t curaddr;
   7938 	bus_size_t seglen, curlen;
   7939 	uint32_t cksumcmd;
   7940 	uint8_t cksumfields;
   7941 	bool remap = true;
   7942 
   7943 	KASSERT(mutex_owned(txq->txq_lock));
   7944 
   7945 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7946 		return;
   7947 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7948 		return;
   7949 
   7950 	if (__predict_false(wm_linkdown_discard(txq))) {
   7951 		do {
   7952 			if (is_transmit)
   7953 				m0 = pcq_get(txq->txq_interq);
   7954 			else
   7955 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   7956 			/*
   7957 			 * increment successed packet counter as in the case
   7958 			 * which the packet is discarded by link down PHY.
   7959 			 */
   7960 			if (m0 != NULL)
   7961 				if_statinc(ifp, if_opackets);
   7962 			m_freem(m0);
   7963 		} while (m0 != NULL);
   7964 		return;
   7965 	}
   7966 
   7967 	/* Remember the previous number of free descriptors. */
   7968 	ofree = txq->txq_free;
   7969 
   7970 	/*
   7971 	 * Loop through the send queue, setting up transmit descriptors
   7972 	 * until we drain the queue, or use up all available transmit
   7973 	 * descriptors.
   7974 	 */
   7975 	for (;;) {
   7976 		m0 = NULL;
   7977 
   7978 		/* Get a work queue entry. */
   7979 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7980 			wm_txeof(txq, UINT_MAX);
   7981 			if (txq->txq_sfree == 0) {
   7982 				DPRINTF(sc, WM_DEBUG_TX,
   7983 				    ("%s: TX: no free job descriptors\n",
   7984 					device_xname(sc->sc_dev)));
   7985 				WM_Q_EVCNT_INCR(txq, txsstall);
   7986 				break;
   7987 			}
   7988 		}
   7989 
   7990 		/* Grab a packet off the queue. */
   7991 		if (is_transmit)
   7992 			m0 = pcq_get(txq->txq_interq);
   7993 		else
   7994 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7995 		if (m0 == NULL)
   7996 			break;
   7997 
   7998 		DPRINTF(sc, WM_DEBUG_TX,
   7999 		    ("%s: TX: have packet to transmit: %p\n",
   8000 			device_xname(sc->sc_dev), m0));
   8001 
   8002 		txs = &txq->txq_soft[txq->txq_snext];
   8003 		dmamap = txs->txs_dmamap;
   8004 
   8005 		use_tso = (m0->m_pkthdr.csum_flags &
   8006 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   8007 
   8008 		/*
   8009 		 * So says the Linux driver:
   8010 		 * The controller does a simple calculation to make sure
   8011 		 * there is enough room in the FIFO before initiating the
   8012 		 * DMA for each buffer. The calc is:
   8013 		 *	4 = ceil(buffer len / MSS)
   8014 		 * To make sure we don't overrun the FIFO, adjust the max
   8015 		 * buffer len if the MSS drops.
   8016 		 */
   8017 		dmamap->dm_maxsegsz =
   8018 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   8019 		    ? m0->m_pkthdr.segsz << 2
   8020 		    : WTX_MAX_LEN;
   8021 
   8022 		/*
   8023 		 * Load the DMA map.  If this fails, the packet either
   8024 		 * didn't fit in the allotted number of segments, or we
   8025 		 * were short on resources.  For the too-many-segments
   8026 		 * case, we simply report an error and drop the packet,
   8027 		 * since we can't sanely copy a jumbo packet to a single
   8028 		 * buffer.
   8029 		 */
   8030 retry:
   8031 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8032 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8033 		if (__predict_false(error)) {
   8034 			if (error == EFBIG) {
   8035 				if (remap == true) {
   8036 					struct mbuf *m;
   8037 
   8038 					remap = false;
   8039 					m = m_defrag(m0, M_NOWAIT);
   8040 					if (m != NULL) {
   8041 						WM_Q_EVCNT_INCR(txq, defrag);
   8042 						m0 = m;
   8043 						goto retry;
   8044 					}
   8045 				}
   8046 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8047 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8048 				    "DMA segments, dropping...\n",
   8049 				    device_xname(sc->sc_dev));
   8050 				wm_dump_mbuf_chain(sc, m0);
   8051 				m_freem(m0);
   8052 				continue;
   8053 			}
   8054 			/* Short on resources, just stop for now. */
   8055 			DPRINTF(sc, WM_DEBUG_TX,
   8056 			    ("%s: TX: dmamap load failed: %d\n",
   8057 				device_xname(sc->sc_dev), error));
   8058 			break;
   8059 		}
   8060 
   8061 		segs_needed = dmamap->dm_nsegs;
   8062 		if (use_tso) {
   8063 			/* For sentinel descriptor; see below. */
   8064 			segs_needed++;
   8065 		}
   8066 
   8067 		/*
   8068 		 * Ensure we have enough descriptors free to describe
   8069 		 * the packet. Note, we always reserve one descriptor
   8070 		 * at the end of the ring due to the semantics of the
   8071 		 * TDT register, plus one more in the event we need
   8072 		 * to load offload context.
   8073 		 */
   8074 		if (segs_needed > txq->txq_free - 2) {
   8075 			/*
   8076 			 * Not enough free descriptors to transmit this
   8077 			 * packet.  We haven't committed anything yet,
   8078 			 * so just unload the DMA map, put the packet
   8079 			 * pack on the queue, and punt. Notify the upper
   8080 			 * layer that there are no more slots left.
   8081 			 */
   8082 			DPRINTF(sc, WM_DEBUG_TX,
   8083 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8084 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8085 				segs_needed, txq->txq_free - 1));
   8086 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8087 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8088 			WM_Q_EVCNT_INCR(txq, txdstall);
   8089 			break;
   8090 		}
   8091 
   8092 		/*
   8093 		 * Check for 82547 Tx FIFO bug. We need to do this
   8094 		 * once we know we can transmit the packet, since we
   8095 		 * do some internal FIFO space accounting here.
   8096 		 */
   8097 		if (sc->sc_type == WM_T_82547 &&
   8098 		    wm_82547_txfifo_bugchk(sc, m0)) {
   8099 			DPRINTF(sc, WM_DEBUG_TX,
   8100 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   8101 				device_xname(sc->sc_dev)));
   8102 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8103 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8104 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   8105 			break;
   8106 		}
   8107 
   8108 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8109 
   8110 		DPRINTF(sc, WM_DEBUG_TX,
   8111 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8112 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8113 
   8114 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8115 
   8116 		/*
   8117 		 * Store a pointer to the packet so that we can free it
   8118 		 * later.
   8119 		 *
   8120 		 * Initially, we consider the number of descriptors the
   8121 		 * packet uses the number of DMA segments.  This may be
   8122 		 * incremented by 1 if we do checksum offload (a descriptor
   8123 		 * is used to set the checksum context).
   8124 		 */
   8125 		txs->txs_mbuf = m0;
   8126 		txs->txs_firstdesc = txq->txq_next;
   8127 		txs->txs_ndesc = segs_needed;
   8128 
   8129 		/* Set up offload parameters for this packet. */
   8130 		if (m0->m_pkthdr.csum_flags &
   8131 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8132 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8133 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8134 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   8135 		} else {
   8136 			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8137 			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   8138 			cksumcmd = 0;
   8139 			cksumfields = 0;
   8140 		}
   8141 
   8142 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   8143 
   8144 		/* Sync the DMA map. */
   8145 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8146 		    BUS_DMASYNC_PREWRITE);
   8147 
   8148 		/* Initialize the transmit descriptor. */
   8149 		for (nexttx = txq->txq_next, seg = 0;
   8150 		     seg < dmamap->dm_nsegs; seg++) {
   8151 			for (seglen = dmamap->dm_segs[seg].ds_len,
   8152 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   8153 			     seglen != 0;
   8154 			     curaddr += curlen, seglen -= curlen,
   8155 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   8156 				curlen = seglen;
   8157 
   8158 				/*
   8159 				 * So says the Linux driver:
   8160 				 * Work around for premature descriptor
   8161 				 * write-backs in TSO mode.  Append a
   8162 				 * 4-byte sentinel descriptor.
   8163 				 */
   8164 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   8165 				    curlen > 8)
   8166 					curlen -= 4;
   8167 
   8168 				wm_set_dma_addr(
   8169 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   8170 				txq->txq_descs[nexttx].wtx_cmdlen
   8171 				    = htole32(cksumcmd | curlen);
   8172 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   8173 				    = 0;
   8174 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   8175 				    = cksumfields;
   8176 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8177 				lasttx = nexttx;
   8178 
   8179 				DPRINTF(sc, WM_DEBUG_TX,
   8180 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   8181 					"len %#04zx\n",
   8182 					device_xname(sc->sc_dev), nexttx,
   8183 					(uint64_t)curaddr, curlen));
   8184 			}
   8185 		}
   8186 
   8187 		KASSERT(lasttx != -1);
   8188 
   8189 		/*
   8190 		 * Set up the command byte on the last descriptor of
   8191 		 * the packet. If we're in the interrupt delay window,
   8192 		 * delay the interrupt.
   8193 		 */
   8194 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8195 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8196 
   8197 		/*
   8198 		 * If VLANs are enabled and the packet has a VLAN tag, set
   8199 		 * up the descriptor to encapsulate the packet for us.
   8200 		 *
   8201 		 * This is only valid on the last descriptor of the packet.
   8202 		 */
   8203 		if (vlan_has_tag(m0)) {
   8204 			txq->txq_descs[lasttx].wtx_cmdlen |=
   8205 			    htole32(WTX_CMD_VLE);
   8206 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   8207 			    = htole16(vlan_get_tag(m0));
   8208 		}
   8209 
   8210 		txs->txs_lastdesc = lasttx;
   8211 
   8212 		DPRINTF(sc, WM_DEBUG_TX,
   8213 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8214 			device_xname(sc->sc_dev),
   8215 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8216 
   8217 		/* Sync the descriptors we're using. */
   8218 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8219 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8220 
   8221 		/* Give the packet to the chip. */
   8222 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8223 
   8224 		DPRINTF(sc, WM_DEBUG_TX,
   8225 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8226 
   8227 		DPRINTF(sc, WM_DEBUG_TX,
   8228 		    ("%s: TX: finished transmitting packet, job %d\n",
   8229 			device_xname(sc->sc_dev), txq->txq_snext));
   8230 
   8231 		/* Advance the tx pointer. */
   8232 		txq->txq_free -= txs->txs_ndesc;
   8233 		txq->txq_next = nexttx;
   8234 
   8235 		txq->txq_sfree--;
   8236 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8237 
   8238 		/* Pass the packet to any BPF listeners. */
   8239 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8240 	}
   8241 
   8242 	if (m0 != NULL) {
   8243 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8244 		WM_Q_EVCNT_INCR(txq, descdrop);
   8245 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8246 			__func__));
   8247 		m_freem(m0);
   8248 	}
   8249 
   8250 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8251 		/* No more slots; notify upper layer. */
   8252 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8253 	}
   8254 
   8255 	if (txq->txq_free != ofree) {
   8256 		/* Set a watchdog timer in case the chip flakes out. */
   8257 		txq->txq_lastsent = time_uptime;
   8258 		txq->txq_sending = true;
   8259 	}
   8260 }
   8261 
   8262 /*
   8263  * wm_nq_tx_offload:
   8264  *
   8265  *	Set up TCP/IP checksumming parameters for the
   8266  *	specified packet, for NEWQUEUE devices
   8267  */
   8268 static void
   8269 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8270     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   8271 {
   8272 	struct mbuf *m0 = txs->txs_mbuf;
   8273 	uint32_t vl_len, mssidx, cmdc;
   8274 	struct ether_header *eh;
   8275 	int offset, iphl;
   8276 
   8277 	/*
   8278 	 * XXX It would be nice if the mbuf pkthdr had offset
   8279 	 * fields for the protocol headers.
   8280 	 */
   8281 	*cmdlenp = 0;
   8282 	*fieldsp = 0;
   8283 
   8284 	eh = mtod(m0, struct ether_header *);
   8285 	switch (htons(eh->ether_type)) {
   8286 	case ETHERTYPE_IP:
   8287 	case ETHERTYPE_IPV6:
   8288 		offset = ETHER_HDR_LEN;
   8289 		break;
   8290 
   8291 	case ETHERTYPE_VLAN:
   8292 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8293 		break;
   8294 
   8295 	default:
   8296 		/* Don't support this protocol or encapsulation. */
   8297 		*do_csum = false;
   8298 		return;
   8299 	}
   8300 	*do_csum = true;
   8301 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   8302 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   8303 
   8304 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   8305 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   8306 
   8307 	if ((m0->m_pkthdr.csum_flags &
   8308 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8309 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8310 	} else {
   8311 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8312 	}
   8313 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   8314 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   8315 
   8316 	if (vlan_has_tag(m0)) {
   8317 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8318 		    << NQTXC_VLLEN_VLAN_SHIFT);
   8319 		*cmdlenp |= NQTX_CMD_VLE;
   8320 	}
   8321 
   8322 	mssidx = 0;
   8323 
   8324 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8325 		int hlen = offset + iphl;
   8326 		int tcp_hlen;
   8327 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8328 
   8329 		if (__predict_false(m0->m_len <
   8330 				    (hlen + sizeof(struct tcphdr)))) {
   8331 			/*
   8332 			 * TCP/IP headers are not in the first mbuf; we need
   8333 			 * to do this the slow and painful way. Let's just
   8334 			 * hope this doesn't happen very often.
   8335 			 */
   8336 			struct tcphdr th;
   8337 
   8338 			WM_Q_EVCNT_INCR(txq, tsopain);
   8339 
   8340 			m_copydata(m0, hlen, sizeof(th), &th);
   8341 			if (v4) {
   8342 				struct ip ip;
   8343 
   8344 				m_copydata(m0, offset, sizeof(ip), &ip);
   8345 				ip.ip_len = 0;
   8346 				m_copyback(m0,
   8347 				    offset + offsetof(struct ip, ip_len),
   8348 				    sizeof(ip.ip_len), &ip.ip_len);
   8349 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8350 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8351 			} else {
   8352 				struct ip6_hdr ip6;
   8353 
   8354 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8355 				ip6.ip6_plen = 0;
   8356 				m_copyback(m0,
   8357 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8358 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8359 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8360 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8361 			}
   8362 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8363 			    sizeof(th.th_sum), &th.th_sum);
   8364 
   8365 			tcp_hlen = th.th_off << 2;
   8366 		} else {
   8367 			/*
   8368 			 * TCP/IP headers are in the first mbuf; we can do
   8369 			 * this the easy way.
   8370 			 */
   8371 			struct tcphdr *th;
   8372 
   8373 			if (v4) {
   8374 				struct ip *ip =
   8375 				    (void *)(mtod(m0, char *) + offset);
   8376 				th = (void *)(mtod(m0, char *) + hlen);
   8377 
   8378 				ip->ip_len = 0;
   8379 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8380 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8381 			} else {
   8382 				struct ip6_hdr *ip6 =
   8383 				    (void *)(mtod(m0, char *) + offset);
   8384 				th = (void *)(mtod(m0, char *) + hlen);
   8385 
   8386 				ip6->ip6_plen = 0;
   8387 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8388 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8389 			}
   8390 			tcp_hlen = th->th_off << 2;
   8391 		}
   8392 		hlen += tcp_hlen;
   8393 		*cmdlenp |= NQTX_CMD_TSE;
   8394 
   8395 		if (v4) {
   8396 			WM_Q_EVCNT_INCR(txq, tso);
   8397 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   8398 		} else {
   8399 			WM_Q_EVCNT_INCR(txq, tso6);
   8400 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   8401 		}
   8402 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   8403 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8404 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   8405 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   8406 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   8407 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   8408 	} else {
   8409 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   8410 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8411 	}
   8412 
   8413 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   8414 		*fieldsp |= NQTXD_FIELDS_IXSM;
   8415 		cmdc |= NQTXC_CMD_IP4;
   8416 	}
   8417 
   8418 	if (m0->m_pkthdr.csum_flags &
   8419 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   8420 		WM_Q_EVCNT_INCR(txq, tusum);
   8421 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   8422 			cmdc |= NQTXC_CMD_TCP;
   8423 		else
   8424 			cmdc |= NQTXC_CMD_UDP;
   8425 
   8426 		cmdc |= NQTXC_CMD_IP4;
   8427 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8428 	}
   8429 	if (m0->m_pkthdr.csum_flags &
   8430 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8431 		WM_Q_EVCNT_INCR(txq, tusum6);
   8432 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8433 			cmdc |= NQTXC_CMD_TCP;
   8434 		else
   8435 			cmdc |= NQTXC_CMD_UDP;
   8436 
   8437 		cmdc |= NQTXC_CMD_IP6;
   8438 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8439 	}
   8440 
   8441 	/*
   8442 	 * We don't have to write context descriptor for every packet to
   8443 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8444 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8445 	 * controllers.
   8446 	 * It would be overhead to write context descriptor for every packet,
   8447 	 * however it does not cause problems.
   8448 	 */
   8449 	/* Fill in the context descriptor. */
   8450 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   8451 	    htole32(vl_len);
   8452 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8453 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8454 	    htole32(cmdc);
   8455 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8456 	    htole32(mssidx);
   8457 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8458 	DPRINTF(sc, WM_DEBUG_TX,
   8459 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8460 		txq->txq_next, 0, vl_len));
   8461 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8462 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8463 	txs->txs_ndesc++;
   8464 }
   8465 
   8466 /*
   8467  * wm_nq_start:		[ifnet interface function]
   8468  *
   8469  *	Start packet transmission on the interface for NEWQUEUE devices
   8470  */
   8471 static void
   8472 wm_nq_start(struct ifnet *ifp)
   8473 {
   8474 	struct wm_softc *sc = ifp->if_softc;
   8475 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8476 
   8477 #ifdef WM_MPSAFE
   8478 	KASSERT(if_is_mpsafe(ifp));
   8479 #endif
   8480 	/*
   8481 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8482 	 */
   8483 
   8484 	mutex_enter(txq->txq_lock);
   8485 	if (!txq->txq_stopping)
   8486 		wm_nq_start_locked(ifp);
   8487 	mutex_exit(txq->txq_lock);
   8488 }
   8489 
   8490 static void
   8491 wm_nq_start_locked(struct ifnet *ifp)
   8492 {
   8493 	struct wm_softc *sc = ifp->if_softc;
   8494 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8495 
   8496 	wm_nq_send_common_locked(ifp, txq, false);
   8497 }
   8498 
   8499 static int
   8500 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8501 {
   8502 	int qid;
   8503 	struct wm_softc *sc = ifp->if_softc;
   8504 	struct wm_txqueue *txq;
   8505 
   8506 	qid = wm_select_txqueue(ifp, m);
   8507 	txq = &sc->sc_queue[qid].wmq_txq;
   8508 
   8509 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8510 		m_freem(m);
   8511 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8512 		return ENOBUFS;
   8513 	}
   8514 
   8515 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8516 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8517 	if (m->m_flags & M_MCAST)
   8518 		if_statinc_ref(nsr, if_omcasts);
   8519 	IF_STAT_PUTREF(ifp);
   8520 
   8521 	/*
   8522 	 * The situations which this mutex_tryenter() fails at running time
   8523 	 * are below two patterns.
   8524 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8525 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8526 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8527 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8528 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8529 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8530 	 * stuck, either.
   8531 	 */
   8532 	if (mutex_tryenter(txq->txq_lock)) {
   8533 		if (!txq->txq_stopping)
   8534 			wm_nq_transmit_locked(ifp, txq);
   8535 		mutex_exit(txq->txq_lock);
   8536 	}
   8537 
   8538 	return 0;
   8539 }
   8540 
   8541 static void
   8542 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8543 {
   8544 
   8545 	wm_nq_send_common_locked(ifp, txq, true);
   8546 }
   8547 
   8548 static void
   8549 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8550     bool is_transmit)
   8551 {
   8552 	struct wm_softc *sc = ifp->if_softc;
   8553 	struct mbuf *m0;
   8554 	struct wm_txsoft *txs;
   8555 	bus_dmamap_t dmamap;
   8556 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8557 	bool do_csum, sent;
   8558 	bool remap = true;
   8559 
   8560 	KASSERT(mutex_owned(txq->txq_lock));
   8561 
   8562 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8563 		return;
   8564 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8565 		return;
   8566 
   8567 	if (__predict_false(wm_linkdown_discard(txq))) {
   8568 		do {
   8569 			if (is_transmit)
   8570 				m0 = pcq_get(txq->txq_interq);
   8571 			else
   8572 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   8573 			/*
   8574 			 * increment successed packet counter as in the case
   8575 			 * which the packet is discarded by link down PHY.
   8576 			 */
   8577 			if (m0 != NULL)
   8578 				if_statinc(ifp, if_opackets);
   8579 			m_freem(m0);
   8580 		} while (m0 != NULL);
   8581 		return;
   8582 	}
   8583 
   8584 	sent = false;
   8585 
   8586 	/*
   8587 	 * Loop through the send queue, setting up transmit descriptors
   8588 	 * until we drain the queue, or use up all available transmit
   8589 	 * descriptors.
   8590 	 */
   8591 	for (;;) {
   8592 		m0 = NULL;
   8593 
   8594 		/* Get a work queue entry. */
   8595 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8596 			wm_txeof(txq, UINT_MAX);
   8597 			if (txq->txq_sfree == 0) {
   8598 				DPRINTF(sc, WM_DEBUG_TX,
   8599 				    ("%s: TX: no free job descriptors\n",
   8600 					device_xname(sc->sc_dev)));
   8601 				WM_Q_EVCNT_INCR(txq, txsstall);
   8602 				break;
   8603 			}
   8604 		}
   8605 
   8606 		/* Grab a packet off the queue. */
   8607 		if (is_transmit)
   8608 			m0 = pcq_get(txq->txq_interq);
   8609 		else
   8610 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8611 		if (m0 == NULL)
   8612 			break;
   8613 
   8614 		DPRINTF(sc, WM_DEBUG_TX,
   8615 		    ("%s: TX: have packet to transmit: %p\n",
   8616 		    device_xname(sc->sc_dev), m0));
   8617 
   8618 		txs = &txq->txq_soft[txq->txq_snext];
   8619 		dmamap = txs->txs_dmamap;
   8620 
   8621 		/*
   8622 		 * Load the DMA map.  If this fails, the packet either
   8623 		 * didn't fit in the allotted number of segments, or we
   8624 		 * were short on resources.  For the too-many-segments
   8625 		 * case, we simply report an error and drop the packet,
   8626 		 * since we can't sanely copy a jumbo packet to a single
   8627 		 * buffer.
   8628 		 */
   8629 retry:
   8630 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8631 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8632 		if (__predict_false(error)) {
   8633 			if (error == EFBIG) {
   8634 				if (remap == true) {
   8635 					struct mbuf *m;
   8636 
   8637 					remap = false;
   8638 					m = m_defrag(m0, M_NOWAIT);
   8639 					if (m != NULL) {
   8640 						WM_Q_EVCNT_INCR(txq, defrag);
   8641 						m0 = m;
   8642 						goto retry;
   8643 					}
   8644 				}
   8645 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8646 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8647 				    "DMA segments, dropping...\n",
   8648 				    device_xname(sc->sc_dev));
   8649 				wm_dump_mbuf_chain(sc, m0);
   8650 				m_freem(m0);
   8651 				continue;
   8652 			}
   8653 			/* Short on resources, just stop for now. */
   8654 			DPRINTF(sc, WM_DEBUG_TX,
   8655 			    ("%s: TX: dmamap load failed: %d\n",
   8656 				device_xname(sc->sc_dev), error));
   8657 			break;
   8658 		}
   8659 
   8660 		segs_needed = dmamap->dm_nsegs;
   8661 
   8662 		/*
   8663 		 * Ensure we have enough descriptors free to describe
   8664 		 * the packet. Note, we always reserve one descriptor
   8665 		 * at the end of the ring due to the semantics of the
   8666 		 * TDT register, plus one more in the event we need
   8667 		 * to load offload context.
   8668 		 */
   8669 		if (segs_needed > txq->txq_free - 2) {
   8670 			/*
   8671 			 * Not enough free descriptors to transmit this
   8672 			 * packet.  We haven't committed anything yet,
   8673 			 * so just unload the DMA map, put the packet
   8674 			 * pack on the queue, and punt. Notify the upper
   8675 			 * layer that there are no more slots left.
   8676 			 */
   8677 			DPRINTF(sc, WM_DEBUG_TX,
   8678 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8679 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8680 				segs_needed, txq->txq_free - 1));
   8681 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8682 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8683 			WM_Q_EVCNT_INCR(txq, txdstall);
   8684 			break;
   8685 		}
   8686 
   8687 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8688 
   8689 		DPRINTF(sc, WM_DEBUG_TX,
   8690 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8691 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8692 
   8693 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8694 
   8695 		/*
   8696 		 * Store a pointer to the packet so that we can free it
   8697 		 * later.
   8698 		 *
   8699 		 * Initially, we consider the number of descriptors the
   8700 		 * packet uses the number of DMA segments.  This may be
   8701 		 * incremented by 1 if we do checksum offload (a descriptor
   8702 		 * is used to set the checksum context).
   8703 		 */
   8704 		txs->txs_mbuf = m0;
   8705 		txs->txs_firstdesc = txq->txq_next;
   8706 		txs->txs_ndesc = segs_needed;
   8707 
   8708 		/* Set up offload parameters for this packet. */
   8709 		uint32_t cmdlen, fields, dcmdlen;
   8710 		if (m0->m_pkthdr.csum_flags &
   8711 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8712 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8713 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8714 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8715 			    &do_csum);
   8716 		} else {
   8717 			do_csum = false;
   8718 			cmdlen = 0;
   8719 			fields = 0;
   8720 		}
   8721 
   8722 		/* Sync the DMA map. */
   8723 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8724 		    BUS_DMASYNC_PREWRITE);
   8725 
   8726 		/* Initialize the first transmit descriptor. */
   8727 		nexttx = txq->txq_next;
   8728 		if (!do_csum) {
   8729 			/* Setup a legacy descriptor */
   8730 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8731 			    dmamap->dm_segs[0].ds_addr);
   8732 			txq->txq_descs[nexttx].wtx_cmdlen =
   8733 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8734 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8735 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8736 			if (vlan_has_tag(m0)) {
   8737 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8738 				    htole32(WTX_CMD_VLE);
   8739 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8740 				    htole16(vlan_get_tag(m0));
   8741 			} else
   8742 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8743 
   8744 			dcmdlen = 0;
   8745 		} else {
   8746 			/* Setup an advanced data descriptor */
   8747 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8748 			    htole64(dmamap->dm_segs[0].ds_addr);
   8749 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8750 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8751 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   8752 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8753 			    htole32(fields);
   8754 			DPRINTF(sc, WM_DEBUG_TX,
   8755 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8756 				device_xname(sc->sc_dev), nexttx,
   8757 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8758 			DPRINTF(sc, WM_DEBUG_TX,
   8759 			    ("\t 0x%08x%08x\n", fields,
   8760 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8761 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8762 		}
   8763 
   8764 		lasttx = nexttx;
   8765 		nexttx = WM_NEXTTX(txq, nexttx);
   8766 		/*
   8767 		 * Fill in the next descriptors. legacy or advanced format
   8768 		 * is the same here
   8769 		 */
   8770 		for (seg = 1; seg < dmamap->dm_nsegs;
   8771 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8772 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8773 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8774 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8775 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8776 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8777 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8778 			lasttx = nexttx;
   8779 
   8780 			DPRINTF(sc, WM_DEBUG_TX,
   8781 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8782 				device_xname(sc->sc_dev), nexttx,
   8783 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8784 				dmamap->dm_segs[seg].ds_len));
   8785 		}
   8786 
   8787 		KASSERT(lasttx != -1);
   8788 
   8789 		/*
   8790 		 * Set up the command byte on the last descriptor of
   8791 		 * the packet. If we're in the interrupt delay window,
   8792 		 * delay the interrupt.
   8793 		 */
   8794 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8795 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8796 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8797 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8798 
   8799 		txs->txs_lastdesc = lasttx;
   8800 
   8801 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8802 		    device_xname(sc->sc_dev),
   8803 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8804 
   8805 		/* Sync the descriptors we're using. */
   8806 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8807 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8808 
   8809 		/* Give the packet to the chip. */
   8810 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8811 		sent = true;
   8812 
   8813 		DPRINTF(sc, WM_DEBUG_TX,
   8814 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8815 
   8816 		DPRINTF(sc, WM_DEBUG_TX,
   8817 		    ("%s: TX: finished transmitting packet, job %d\n",
   8818 			device_xname(sc->sc_dev), txq->txq_snext));
   8819 
   8820 		/* Advance the tx pointer. */
   8821 		txq->txq_free -= txs->txs_ndesc;
   8822 		txq->txq_next = nexttx;
   8823 
   8824 		txq->txq_sfree--;
   8825 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8826 
   8827 		/* Pass the packet to any BPF listeners. */
   8828 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8829 	}
   8830 
   8831 	if (m0 != NULL) {
   8832 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8833 		WM_Q_EVCNT_INCR(txq, descdrop);
   8834 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8835 			__func__));
   8836 		m_freem(m0);
   8837 	}
   8838 
   8839 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8840 		/* No more slots; notify upper layer. */
   8841 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8842 	}
   8843 
   8844 	if (sent) {
   8845 		/* Set a watchdog timer in case the chip flakes out. */
   8846 		txq->txq_lastsent = time_uptime;
   8847 		txq->txq_sending = true;
   8848 	}
   8849 }
   8850 
   8851 static void
   8852 wm_deferred_start_locked(struct wm_txqueue *txq)
   8853 {
   8854 	struct wm_softc *sc = txq->txq_sc;
   8855 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8856 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8857 	int qid = wmq->wmq_id;
   8858 
   8859 	KASSERT(mutex_owned(txq->txq_lock));
   8860 
   8861 	if (txq->txq_stopping) {
   8862 		mutex_exit(txq->txq_lock);
   8863 		return;
   8864 	}
   8865 
   8866 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8867 		/* XXX need for ALTQ or one CPU system */
   8868 		if (qid == 0)
   8869 			wm_nq_start_locked(ifp);
   8870 		wm_nq_transmit_locked(ifp, txq);
   8871 	} else {
   8872 		/* XXX need for ALTQ or one CPU system */
   8873 		if (qid == 0)
   8874 			wm_start_locked(ifp);
   8875 		wm_transmit_locked(ifp, txq);
   8876 	}
   8877 }
   8878 
   8879 /* Interrupt */
   8880 
   8881 /*
   8882  * wm_txeof:
   8883  *
   8884  *	Helper; handle transmit interrupts.
   8885  */
   8886 static bool
   8887 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8888 {
   8889 	struct wm_softc *sc = txq->txq_sc;
   8890 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8891 	struct wm_txsoft *txs;
   8892 	int count = 0;
   8893 	int i;
   8894 	uint8_t status;
   8895 	bool more = false;
   8896 
   8897 	KASSERT(mutex_owned(txq->txq_lock));
   8898 
   8899 	if (txq->txq_stopping)
   8900 		return false;
   8901 
   8902 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8903 
   8904 	/*
   8905 	 * Go through the Tx list and free mbufs for those
   8906 	 * frames which have been transmitted.
   8907 	 */
   8908 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8909 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8910 		if (limit-- == 0) {
   8911 			more = true;
   8912 			DPRINTF(sc, WM_DEBUG_TX,
   8913 			    ("%s: TX: loop limited, job %d is not processed\n",
   8914 				device_xname(sc->sc_dev), i));
   8915 			break;
   8916 		}
   8917 
   8918 		txs = &txq->txq_soft[i];
   8919 
   8920 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8921 			device_xname(sc->sc_dev), i));
   8922 
   8923 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8924 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8925 
   8926 		status =
   8927 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8928 		if ((status & WTX_ST_DD) == 0) {
   8929 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8930 			    BUS_DMASYNC_PREREAD);
   8931 			break;
   8932 		}
   8933 
   8934 		count++;
   8935 		DPRINTF(sc, WM_DEBUG_TX,
   8936 		    ("%s: TX: job %d done: descs %d..%d\n",
   8937 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8938 		    txs->txs_lastdesc));
   8939 
   8940 		/*
   8941 		 * XXX We should probably be using the statistics
   8942 		 * XXX registers, but I don't know if they exist
   8943 		 * XXX on chips before the i82544.
   8944 		 */
   8945 
   8946 #ifdef WM_EVENT_COUNTERS
   8947 		if (status & WTX_ST_TU)
   8948 			WM_Q_EVCNT_INCR(txq, underrun);
   8949 #endif /* WM_EVENT_COUNTERS */
   8950 
   8951 		/*
   8952 		 * 82574 and newer's document says the status field has neither
   8953 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8954 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8955 		 * Developer's Manual", 82574 datasheet and newer.
   8956 		 *
   8957 		 * XXX I saw the LC bit was set on I218 even though the media
   8958 		 * was full duplex, so the bit might be used for other
   8959 		 * meaning ...(I have no document).
   8960 		 */
   8961 
   8962 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8963 		    && ((sc->sc_type < WM_T_82574)
   8964 			|| (sc->sc_type == WM_T_80003))) {
   8965 			if_statinc(ifp, if_oerrors);
   8966 			if (status & WTX_ST_LC)
   8967 				log(LOG_WARNING, "%s: late collision\n",
   8968 				    device_xname(sc->sc_dev));
   8969 			else if (status & WTX_ST_EC) {
   8970 				if_statadd(ifp, if_collisions,
   8971 				    TX_COLLISION_THRESHOLD + 1);
   8972 				log(LOG_WARNING, "%s: excessive collisions\n",
   8973 				    device_xname(sc->sc_dev));
   8974 			}
   8975 		} else
   8976 			if_statinc(ifp, if_opackets);
   8977 
   8978 		txq->txq_packets++;
   8979 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8980 
   8981 		txq->txq_free += txs->txs_ndesc;
   8982 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8983 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8984 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8985 		m_freem(txs->txs_mbuf);
   8986 		txs->txs_mbuf = NULL;
   8987 	}
   8988 
   8989 	/* Update the dirty transmit buffer pointer. */
   8990 	txq->txq_sdirty = i;
   8991 	DPRINTF(sc, WM_DEBUG_TX,
   8992 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8993 
   8994 	if (count != 0)
   8995 		rnd_add_uint32(&sc->rnd_source, count);
   8996 
   8997 	/*
   8998 	 * If there are no more pending transmissions, cancel the watchdog
   8999 	 * timer.
   9000 	 */
   9001 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   9002 		txq->txq_sending = false;
   9003 
   9004 	return more;
   9005 }
   9006 
   9007 static inline uint32_t
   9008 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   9009 {
   9010 	struct wm_softc *sc = rxq->rxq_sc;
   9011 
   9012 	if (sc->sc_type == WM_T_82574)
   9013 		return EXTRXC_STATUS(
   9014 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9015 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9016 		return NQRXC_STATUS(
   9017 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9018 	else
   9019 		return rxq->rxq_descs[idx].wrx_status;
   9020 }
   9021 
   9022 static inline uint32_t
   9023 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   9024 {
   9025 	struct wm_softc *sc = rxq->rxq_sc;
   9026 
   9027 	if (sc->sc_type == WM_T_82574)
   9028 		return EXTRXC_ERROR(
   9029 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9030 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9031 		return NQRXC_ERROR(
   9032 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9033 	else
   9034 		return rxq->rxq_descs[idx].wrx_errors;
   9035 }
   9036 
   9037 static inline uint16_t
   9038 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   9039 {
   9040 	struct wm_softc *sc = rxq->rxq_sc;
   9041 
   9042 	if (sc->sc_type == WM_T_82574)
   9043 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   9044 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9045 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   9046 	else
   9047 		return rxq->rxq_descs[idx].wrx_special;
   9048 }
   9049 
   9050 static inline int
   9051 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   9052 {
   9053 	struct wm_softc *sc = rxq->rxq_sc;
   9054 
   9055 	if (sc->sc_type == WM_T_82574)
   9056 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   9057 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9058 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   9059 	else
   9060 		return rxq->rxq_descs[idx].wrx_len;
   9061 }
   9062 
   9063 #ifdef WM_DEBUG
   9064 static inline uint32_t
   9065 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   9066 {
   9067 	struct wm_softc *sc = rxq->rxq_sc;
   9068 
   9069 	if (sc->sc_type == WM_T_82574)
   9070 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   9071 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9072 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   9073 	else
   9074 		return 0;
   9075 }
   9076 
   9077 static inline uint8_t
   9078 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   9079 {
   9080 	struct wm_softc *sc = rxq->rxq_sc;
   9081 
   9082 	if (sc->sc_type == WM_T_82574)
   9083 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   9084 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9085 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   9086 	else
   9087 		return 0;
   9088 }
   9089 #endif /* WM_DEBUG */
   9090 
   9091 static inline bool
   9092 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   9093     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9094 {
   9095 
   9096 	if (sc->sc_type == WM_T_82574)
   9097 		return (status & ext_bit) != 0;
   9098 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9099 		return (status & nq_bit) != 0;
   9100 	else
   9101 		return (status & legacy_bit) != 0;
   9102 }
   9103 
   9104 static inline bool
   9105 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   9106     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9107 {
   9108 
   9109 	if (sc->sc_type == WM_T_82574)
   9110 		return (error & ext_bit) != 0;
   9111 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9112 		return (error & nq_bit) != 0;
   9113 	else
   9114 		return (error & legacy_bit) != 0;
   9115 }
   9116 
   9117 static inline bool
   9118 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   9119 {
   9120 
   9121 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9122 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   9123 		return true;
   9124 	else
   9125 		return false;
   9126 }
   9127 
   9128 static inline bool
   9129 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   9130 {
   9131 	struct wm_softc *sc = rxq->rxq_sc;
   9132 
   9133 	/* XXX missing error bit for newqueue? */
   9134 	if (wm_rxdesc_is_set_error(sc, errors,
   9135 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   9136 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   9137 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   9138 		NQRXC_ERROR_RXE)) {
   9139 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   9140 		    EXTRXC_ERROR_SE, 0))
   9141 			log(LOG_WARNING, "%s: symbol error\n",
   9142 			    device_xname(sc->sc_dev));
   9143 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   9144 		    EXTRXC_ERROR_SEQ, 0))
   9145 			log(LOG_WARNING, "%s: receive sequence error\n",
   9146 			    device_xname(sc->sc_dev));
   9147 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   9148 		    EXTRXC_ERROR_CE, 0))
   9149 			log(LOG_WARNING, "%s: CRC error\n",
   9150 			    device_xname(sc->sc_dev));
   9151 		return true;
   9152 	}
   9153 
   9154 	return false;
   9155 }
   9156 
   9157 static inline bool
   9158 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   9159 {
   9160 	struct wm_softc *sc = rxq->rxq_sc;
   9161 
   9162 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   9163 		NQRXC_STATUS_DD)) {
   9164 		/* We have processed all of the receive descriptors. */
   9165 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   9166 		return false;
   9167 	}
   9168 
   9169 	return true;
   9170 }
   9171 
   9172 static inline bool
   9173 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   9174     uint16_t vlantag, struct mbuf *m)
   9175 {
   9176 
   9177 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9178 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   9179 		vlan_set_tag(m, le16toh(vlantag));
   9180 	}
   9181 
   9182 	return true;
   9183 }
   9184 
   9185 static inline void
   9186 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   9187     uint32_t errors, struct mbuf *m)
   9188 {
   9189 	struct wm_softc *sc = rxq->rxq_sc;
   9190 
   9191 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   9192 		if (wm_rxdesc_is_set_status(sc, status,
   9193 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   9194 			WM_Q_EVCNT_INCR(rxq, ipsum);
   9195 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   9196 			if (wm_rxdesc_is_set_error(sc, errors,
   9197 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   9198 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   9199 		}
   9200 		if (wm_rxdesc_is_set_status(sc, status,
   9201 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   9202 			/*
   9203 			 * Note: we don't know if this was TCP or UDP,
   9204 			 * so we just set both bits, and expect the
   9205 			 * upper layers to deal.
   9206 			 */
   9207 			WM_Q_EVCNT_INCR(rxq, tusum);
   9208 			m->m_pkthdr.csum_flags |=
   9209 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9210 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   9211 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   9212 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   9213 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   9214 		}
   9215 	}
   9216 }
   9217 
   9218 /*
   9219  * wm_rxeof:
   9220  *
   9221  *	Helper; handle receive interrupts.
   9222  */
   9223 static bool
   9224 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   9225 {
   9226 	struct wm_softc *sc = rxq->rxq_sc;
   9227 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9228 	struct wm_rxsoft *rxs;
   9229 	struct mbuf *m;
   9230 	int i, len;
   9231 	int count = 0;
   9232 	uint32_t status, errors;
   9233 	uint16_t vlantag;
   9234 	bool more = false;
   9235 
   9236 	KASSERT(mutex_owned(rxq->rxq_lock));
   9237 
   9238 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   9239 		if (limit-- == 0) {
   9240 			more = true;
   9241 			DPRINTF(sc, WM_DEBUG_RX,
   9242 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   9243 				device_xname(sc->sc_dev), i));
   9244 			break;
   9245 		}
   9246 
   9247 		rxs = &rxq->rxq_soft[i];
   9248 
   9249 		DPRINTF(sc, WM_DEBUG_RX,
   9250 		    ("%s: RX: checking descriptor %d\n",
   9251 			device_xname(sc->sc_dev), i));
   9252 		wm_cdrxsync(rxq, i,
   9253 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9254 
   9255 		status = wm_rxdesc_get_status(rxq, i);
   9256 		errors = wm_rxdesc_get_errors(rxq, i);
   9257 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   9258 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   9259 #ifdef WM_DEBUG
   9260 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   9261 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   9262 #endif
   9263 
   9264 		if (!wm_rxdesc_dd(rxq, i, status)) {
   9265 			break;
   9266 		}
   9267 
   9268 		count++;
   9269 		if (__predict_false(rxq->rxq_discard)) {
   9270 			DPRINTF(sc, WM_DEBUG_RX,
   9271 			    ("%s: RX: discarding contents of descriptor %d\n",
   9272 				device_xname(sc->sc_dev), i));
   9273 			wm_init_rxdesc(rxq, i);
   9274 			if (wm_rxdesc_is_eop(rxq, status)) {
   9275 				/* Reset our state. */
   9276 				DPRINTF(sc, WM_DEBUG_RX,
   9277 				    ("%s: RX: resetting rxdiscard -> 0\n",
   9278 					device_xname(sc->sc_dev)));
   9279 				rxq->rxq_discard = 0;
   9280 			}
   9281 			continue;
   9282 		}
   9283 
   9284 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9285 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   9286 
   9287 		m = rxs->rxs_mbuf;
   9288 
   9289 		/*
   9290 		 * Add a new receive buffer to the ring, unless of
   9291 		 * course the length is zero. Treat the latter as a
   9292 		 * failed mapping.
   9293 		 */
   9294 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   9295 			/*
   9296 			 * Failed, throw away what we've done so
   9297 			 * far, and discard the rest of the packet.
   9298 			 */
   9299 			if_statinc(ifp, if_ierrors);
   9300 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9301 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   9302 			wm_init_rxdesc(rxq, i);
   9303 			if (!wm_rxdesc_is_eop(rxq, status))
   9304 				rxq->rxq_discard = 1;
   9305 			if (rxq->rxq_head != NULL)
   9306 				m_freem(rxq->rxq_head);
   9307 			WM_RXCHAIN_RESET(rxq);
   9308 			DPRINTF(sc, WM_DEBUG_RX,
   9309 			    ("%s: RX: Rx buffer allocation failed, "
   9310 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   9311 				rxq->rxq_discard ? " (discard)" : ""));
   9312 			continue;
   9313 		}
   9314 
   9315 		m->m_len = len;
   9316 		rxq->rxq_len += len;
   9317 		DPRINTF(sc, WM_DEBUG_RX,
   9318 		    ("%s: RX: buffer at %p len %d\n",
   9319 			device_xname(sc->sc_dev), m->m_data, len));
   9320 
   9321 		/* If this is not the end of the packet, keep looking. */
   9322 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9323 			WM_RXCHAIN_LINK(rxq, m);
   9324 			DPRINTF(sc, WM_DEBUG_RX,
   9325 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9326 				device_xname(sc->sc_dev), rxq->rxq_len));
   9327 			continue;
   9328 		}
   9329 
   9330 		/*
   9331 		 * Okay, we have the entire packet now. The chip is
   9332 		 * configured to include the FCS except I35[04], I21[01].
   9333 		 * (not all chips can be configured to strip it), so we need
   9334 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
   9335 		 * in RCTL register is always set, so we don't trim it.
   9336 		 * PCH2 and newer chip also not include FCS when jumbo
   9337 		 * frame is used to do workaround an errata.
   9338 		 * May need to adjust length of previous mbuf in the
   9339 		 * chain if the current mbuf is too short.
   9340 		 */
   9341 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
   9342 			if (m->m_len < ETHER_CRC_LEN) {
   9343 				rxq->rxq_tail->m_len
   9344 				    -= (ETHER_CRC_LEN - m->m_len);
   9345 				m->m_len = 0;
   9346 			} else
   9347 				m->m_len -= ETHER_CRC_LEN;
   9348 			len = rxq->rxq_len - ETHER_CRC_LEN;
   9349 		} else
   9350 			len = rxq->rxq_len;
   9351 
   9352 		WM_RXCHAIN_LINK(rxq, m);
   9353 
   9354 		*rxq->rxq_tailp = NULL;
   9355 		m = rxq->rxq_head;
   9356 
   9357 		WM_RXCHAIN_RESET(rxq);
   9358 
   9359 		DPRINTF(sc, WM_DEBUG_RX,
   9360 		    ("%s: RX: have entire packet, len -> %d\n",
   9361 			device_xname(sc->sc_dev), len));
   9362 
   9363 		/* If an error occurred, update stats and drop the packet. */
   9364 		if (wm_rxdesc_has_errors(rxq, errors)) {
   9365 			m_freem(m);
   9366 			continue;
   9367 		}
   9368 
   9369 		/* No errors.  Receive the packet. */
   9370 		m_set_rcvif(m, ifp);
   9371 		m->m_pkthdr.len = len;
   9372 		/*
   9373 		 * TODO
   9374 		 * should be save rsshash and rsstype to this mbuf.
   9375 		 */
   9376 		DPRINTF(sc, WM_DEBUG_RX,
   9377 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   9378 			device_xname(sc->sc_dev), rsstype, rsshash));
   9379 
   9380 		/*
   9381 		 * If VLANs are enabled, VLAN packets have been unwrapped
   9382 		 * for us.  Associate the tag with the packet.
   9383 		 */
   9384 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   9385 			continue;
   9386 
   9387 		/* Set up checksum info for this packet. */
   9388 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   9389 
   9390 		rxq->rxq_packets++;
   9391 		rxq->rxq_bytes += len;
   9392 		/* Pass it on. */
   9393 		if_percpuq_enqueue(sc->sc_ipq, m);
   9394 
   9395 		if (rxq->rxq_stopping)
   9396 			break;
   9397 	}
   9398 	rxq->rxq_ptr = i;
   9399 
   9400 	if (count != 0)
   9401 		rnd_add_uint32(&sc->rnd_source, count);
   9402 
   9403 	DPRINTF(sc, WM_DEBUG_RX,
   9404 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   9405 
   9406 	return more;
   9407 }
   9408 
   9409 /*
   9410  * wm_linkintr_gmii:
   9411  *
   9412  *	Helper; handle link interrupts for GMII.
   9413  */
   9414 static void
   9415 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   9416 {
   9417 	device_t dev = sc->sc_dev;
   9418 	uint32_t status, reg;
   9419 	bool link;
   9420 	int rv;
   9421 
   9422 	KASSERT(WM_CORE_LOCKED(sc));
   9423 
   9424 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9425 		__func__));
   9426 
   9427 	if ((icr & ICR_LSC) == 0) {
   9428 		if (icr & ICR_RXSEQ)
   9429 			DPRINTF(sc, WM_DEBUG_LINK,
   9430 			    ("%s: LINK Receive sequence error\n",
   9431 				device_xname(dev)));
   9432 		return;
   9433 	}
   9434 
   9435 	/* Link status changed */
   9436 	status = CSR_READ(sc, WMREG_STATUS);
   9437 	link = status & STATUS_LU;
   9438 	if (link) {
   9439 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9440 			device_xname(dev),
   9441 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9442 		if (wm_phy_need_linkdown_discard(sc))
   9443 			wm_clear_linkdown_discard(sc);
   9444 	} else {
   9445 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9446 			device_xname(dev)));
   9447 		if (wm_phy_need_linkdown_discard(sc))
   9448 			wm_set_linkdown_discard(sc);
   9449 	}
   9450 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9451 		wm_gig_downshift_workaround_ich8lan(sc);
   9452 
   9453 	if ((sc->sc_type == WM_T_ICH8)
   9454 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   9455 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9456 	}
   9457 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9458 		device_xname(dev)));
   9459 	mii_pollstat(&sc->sc_mii);
   9460 	if (sc->sc_type == WM_T_82543) {
   9461 		int miistatus, active;
   9462 
   9463 		/*
   9464 		 * With 82543, we need to force speed and
   9465 		 * duplex on the MAC equal to what the PHY
   9466 		 * speed and duplex configuration is.
   9467 		 */
   9468 		miistatus = sc->sc_mii.mii_media_status;
   9469 
   9470 		if (miistatus & IFM_ACTIVE) {
   9471 			active = sc->sc_mii.mii_media_active;
   9472 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9473 			switch (IFM_SUBTYPE(active)) {
   9474 			case IFM_10_T:
   9475 				sc->sc_ctrl |= CTRL_SPEED_10;
   9476 				break;
   9477 			case IFM_100_TX:
   9478 				sc->sc_ctrl |= CTRL_SPEED_100;
   9479 				break;
   9480 			case IFM_1000_T:
   9481 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9482 				break;
   9483 			default:
   9484 				/*
   9485 				 * Fiber?
   9486 				 * Shoud not enter here.
   9487 				 */
   9488 				device_printf(dev, "unknown media (%x)\n",
   9489 				    active);
   9490 				break;
   9491 			}
   9492 			if (active & IFM_FDX)
   9493 				sc->sc_ctrl |= CTRL_FD;
   9494 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9495 		}
   9496 	} else if (sc->sc_type == WM_T_PCH) {
   9497 		wm_k1_gig_workaround_hv(sc,
   9498 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9499 	}
   9500 
   9501 	/*
   9502 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9503 	 * aggressive resulting in many collisions. To avoid this, increase
   9504 	 * the IPG and reduce Rx latency in the PHY.
   9505 	 */
   9506 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9507 	    && link) {
   9508 		uint32_t tipg_reg;
   9509 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9510 		bool fdx;
   9511 		uint16_t emi_addr, emi_val;
   9512 
   9513 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9514 		tipg_reg &= ~TIPG_IPGT_MASK;
   9515 		fdx = status & STATUS_FD;
   9516 
   9517 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9518 			tipg_reg |= 0xff;
   9519 			/* Reduce Rx latency in analog PHY */
   9520 			emi_val = 0;
   9521 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9522 		    fdx && speed != STATUS_SPEED_1000) {
   9523 			tipg_reg |= 0xc;
   9524 			emi_val = 1;
   9525 		} else {
   9526 			/* Roll back the default values */
   9527 			tipg_reg |= 0x08;
   9528 			emi_val = 1;
   9529 		}
   9530 
   9531 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9532 
   9533 		rv = sc->phy.acquire(sc);
   9534 		if (rv)
   9535 			return;
   9536 
   9537 		if (sc->sc_type == WM_T_PCH2)
   9538 			emi_addr = I82579_RX_CONFIG;
   9539 		else
   9540 			emi_addr = I217_RX_CONFIG;
   9541 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9542 
   9543 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9544 			uint16_t phy_reg;
   9545 
   9546 			sc->phy.readreg_locked(dev, 2,
   9547 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9548 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9549 			if (speed == STATUS_SPEED_100
   9550 			    || speed == STATUS_SPEED_10)
   9551 				phy_reg |= 0x3e8;
   9552 			else
   9553 				phy_reg |= 0xfa;
   9554 			sc->phy.writereg_locked(dev, 2,
   9555 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9556 
   9557 			if (speed == STATUS_SPEED_1000) {
   9558 				sc->phy.readreg_locked(dev, 2,
   9559 				    HV_PM_CTRL, &phy_reg);
   9560 
   9561 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9562 
   9563 				sc->phy.writereg_locked(dev, 2,
   9564 				    HV_PM_CTRL, phy_reg);
   9565 			}
   9566 		}
   9567 		sc->phy.release(sc);
   9568 
   9569 		if (rv)
   9570 			return;
   9571 
   9572 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9573 			uint16_t data, ptr_gap;
   9574 
   9575 			if (speed == STATUS_SPEED_1000) {
   9576 				rv = sc->phy.acquire(sc);
   9577 				if (rv)
   9578 					return;
   9579 
   9580 				rv = sc->phy.readreg_locked(dev, 2,
   9581 				    I82579_UNKNOWN1, &data);
   9582 				if (rv) {
   9583 					sc->phy.release(sc);
   9584 					return;
   9585 				}
   9586 
   9587 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9588 				if (ptr_gap < 0x18) {
   9589 					data &= ~(0x3ff << 2);
   9590 					data |= (0x18 << 2);
   9591 					rv = sc->phy.writereg_locked(dev,
   9592 					    2, I82579_UNKNOWN1, data);
   9593 				}
   9594 				sc->phy.release(sc);
   9595 				if (rv)
   9596 					return;
   9597 			} else {
   9598 				rv = sc->phy.acquire(sc);
   9599 				if (rv)
   9600 					return;
   9601 
   9602 				rv = sc->phy.writereg_locked(dev, 2,
   9603 				    I82579_UNKNOWN1, 0xc023);
   9604 				sc->phy.release(sc);
   9605 				if (rv)
   9606 					return;
   9607 
   9608 			}
   9609 		}
   9610 	}
   9611 
   9612 	/*
   9613 	 * I217 Packet Loss issue:
   9614 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9615 	 * on power up.
   9616 	 * Set the Beacon Duration for I217 to 8 usec
   9617 	 */
   9618 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9619 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9620 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9621 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9622 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9623 	}
   9624 
   9625 	/* Work-around I218 hang issue */
   9626 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9627 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9628 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9629 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9630 		wm_k1_workaround_lpt_lp(sc, link);
   9631 
   9632 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9633 		/*
   9634 		 * Set platform power management values for Latency
   9635 		 * Tolerance Reporting (LTR)
   9636 		 */
   9637 		wm_platform_pm_pch_lpt(sc,
   9638 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9639 	}
   9640 
   9641 	/* Clear link partner's EEE ability */
   9642 	sc->eee_lp_ability = 0;
   9643 
   9644 	/* FEXTNVM6 K1-off workaround */
   9645 	if (sc->sc_type == WM_T_PCH_SPT) {
   9646 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9647 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9648 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9649 		else
   9650 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9651 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9652 	}
   9653 
   9654 	if (!link)
   9655 		return;
   9656 
   9657 	switch (sc->sc_type) {
   9658 	case WM_T_PCH2:
   9659 		wm_k1_workaround_lv(sc);
   9660 		/* FALLTHROUGH */
   9661 	case WM_T_PCH:
   9662 		if (sc->sc_phytype == WMPHY_82578)
   9663 			wm_link_stall_workaround_hv(sc);
   9664 		break;
   9665 	default:
   9666 		break;
   9667 	}
   9668 
   9669 	/* Enable/Disable EEE after link up */
   9670 	if (sc->sc_phytype > WMPHY_82579)
   9671 		wm_set_eee_pchlan(sc);
   9672 }
   9673 
   9674 /*
   9675  * wm_linkintr_tbi:
   9676  *
   9677  *	Helper; handle link interrupts for TBI mode.
   9678  */
   9679 static void
   9680 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9681 {
   9682 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9683 	uint32_t status;
   9684 
   9685 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9686 		__func__));
   9687 
   9688 	status = CSR_READ(sc, WMREG_STATUS);
   9689 	if (icr & ICR_LSC) {
   9690 		wm_check_for_link(sc);
   9691 		if (status & STATUS_LU) {
   9692 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9693 				device_xname(sc->sc_dev),
   9694 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9695 			/*
   9696 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9697 			 * so we should update sc->sc_ctrl
   9698 			 */
   9699 
   9700 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9701 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9702 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9703 			if (status & STATUS_FD)
   9704 				sc->sc_tctl |=
   9705 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9706 			else
   9707 				sc->sc_tctl |=
   9708 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9709 			if (sc->sc_ctrl & CTRL_TFCE)
   9710 				sc->sc_fcrtl |= FCRTL_XONE;
   9711 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9712 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9713 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9714 			sc->sc_tbi_linkup = 1;
   9715 			if_link_state_change(ifp, LINK_STATE_UP);
   9716 		} else {
   9717 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9718 				device_xname(sc->sc_dev)));
   9719 			sc->sc_tbi_linkup = 0;
   9720 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9721 		}
   9722 		/* Update LED */
   9723 		wm_tbi_serdes_set_linkled(sc);
   9724 	} else if (icr & ICR_RXSEQ)
   9725 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9726 			device_xname(sc->sc_dev)));
   9727 }
   9728 
   9729 /*
   9730  * wm_linkintr_serdes:
   9731  *
   9732  *	Helper; handle link interrupts for TBI mode.
   9733  */
   9734 static void
   9735 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9736 {
   9737 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9738 	struct mii_data *mii = &sc->sc_mii;
   9739 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9740 	uint32_t pcs_adv, pcs_lpab, reg;
   9741 
   9742 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9743 		__func__));
   9744 
   9745 	if (icr & ICR_LSC) {
   9746 		/* Check PCS */
   9747 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9748 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9749 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9750 				device_xname(sc->sc_dev)));
   9751 			mii->mii_media_status |= IFM_ACTIVE;
   9752 			sc->sc_tbi_linkup = 1;
   9753 			if_link_state_change(ifp, LINK_STATE_UP);
   9754 		} else {
   9755 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9756 				device_xname(sc->sc_dev)));
   9757 			mii->mii_media_status |= IFM_NONE;
   9758 			sc->sc_tbi_linkup = 0;
   9759 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9760 			wm_tbi_serdes_set_linkled(sc);
   9761 			return;
   9762 		}
   9763 		mii->mii_media_active |= IFM_1000_SX;
   9764 		if ((reg & PCS_LSTS_FDX) != 0)
   9765 			mii->mii_media_active |= IFM_FDX;
   9766 		else
   9767 			mii->mii_media_active |= IFM_HDX;
   9768 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9769 			/* Check flow */
   9770 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9771 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9772 				DPRINTF(sc, WM_DEBUG_LINK,
   9773 				    ("XXX LINKOK but not ACOMP\n"));
   9774 				return;
   9775 			}
   9776 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9777 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9778 			DPRINTF(sc, WM_DEBUG_LINK,
   9779 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9780 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9781 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9782 				mii->mii_media_active |= IFM_FLOW
   9783 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9784 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9785 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9786 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9787 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9788 				mii->mii_media_active |= IFM_FLOW
   9789 				    | IFM_ETH_TXPAUSE;
   9790 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9791 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9792 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9793 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9794 				mii->mii_media_active |= IFM_FLOW
   9795 				    | IFM_ETH_RXPAUSE;
   9796 		}
   9797 		/* Update LED */
   9798 		wm_tbi_serdes_set_linkled(sc);
   9799 	} else
   9800 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9801 		    device_xname(sc->sc_dev)));
   9802 }
   9803 
   9804 /*
   9805  * wm_linkintr:
   9806  *
   9807  *	Helper; handle link interrupts.
   9808  */
   9809 static void
   9810 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9811 {
   9812 
   9813 	KASSERT(WM_CORE_LOCKED(sc));
   9814 
   9815 	if (sc->sc_flags & WM_F_HAS_MII)
   9816 		wm_linkintr_gmii(sc, icr);
   9817 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9818 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9819 		wm_linkintr_serdes(sc, icr);
   9820 	else
   9821 		wm_linkintr_tbi(sc, icr);
   9822 }
   9823 
   9824 
   9825 static inline void
   9826 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   9827 {
   9828 
   9829 	if (wmq->wmq_txrx_use_workqueue)
   9830 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
   9831 	else
   9832 		softint_schedule(wmq->wmq_si);
   9833 }
   9834 
   9835 static inline void
   9836 wm_legacy_intr_disable(struct wm_softc *sc)
   9837 {
   9838 
   9839 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   9840 }
   9841 
   9842 static inline void
   9843 wm_legacy_intr_enable(struct wm_softc *sc)
   9844 {
   9845 
   9846 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   9847 }
   9848 
   9849 /*
   9850  * wm_intr_legacy:
   9851  *
   9852  *	Interrupt service routine for INTx and MSI.
   9853  */
   9854 static int
   9855 wm_intr_legacy(void *arg)
   9856 {
   9857 	struct wm_softc *sc = arg;
   9858 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9859 	struct wm_queue *wmq = &sc->sc_queue[0];
   9860 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9861 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9862 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9863 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9864 	uint32_t icr, rndval = 0;
   9865 	bool more = false;
   9866 
   9867 	icr = CSR_READ(sc, WMREG_ICR);
   9868 	if ((icr & sc->sc_icr) == 0)
   9869 		return 0;
   9870 
   9871 	DPRINTF(sc, WM_DEBUG_TX,
   9872 	    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9873 	if (rndval == 0)
   9874 		rndval = icr;
   9875 
   9876 	mutex_enter(rxq->rxq_lock);
   9877 
   9878 	if (rxq->rxq_stopping) {
   9879 		mutex_exit(rxq->rxq_lock);
   9880 		return 1;
   9881 	}
   9882 
   9883 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9884 	if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9885 		DPRINTF(sc, WM_DEBUG_RX,
   9886 		    ("%s: RX: got Rx intr 0x%08x\n",
   9887 			device_xname(sc->sc_dev),
   9888 			icr & (ICR_RXDMT0 | ICR_RXT0)));
   9889 		WM_Q_EVCNT_INCR(rxq, intr);
   9890 	}
   9891 #endif
   9892 	/*
   9893 	 * wm_rxeof() does *not* call upper layer functions directly,
   9894 	 * as if_percpuq_enqueue() just call softint_schedule().
   9895 	 * So, we can call wm_rxeof() in interrupt context.
   9896 	 */
   9897 	more = wm_rxeof(rxq, rxlimit);
   9898 
   9899 	mutex_exit(rxq->rxq_lock);
   9900 	mutex_enter(txq->txq_lock);
   9901 
   9902 	if (txq->txq_stopping) {
   9903 		mutex_exit(txq->txq_lock);
   9904 		return 1;
   9905 	}
   9906 
   9907 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9908 	if (icr & ICR_TXDW) {
   9909 		DPRINTF(sc, WM_DEBUG_TX,
   9910 		    ("%s: TX: got TXDW interrupt\n",
   9911 			device_xname(sc->sc_dev)));
   9912 		WM_Q_EVCNT_INCR(txq, txdw);
   9913 	}
   9914 #endif
   9915 	more |= wm_txeof(txq, txlimit);
   9916 	if (!IF_IS_EMPTY(&ifp->if_snd))
   9917 		more = true;
   9918 
   9919 	mutex_exit(txq->txq_lock);
   9920 	WM_CORE_LOCK(sc);
   9921 
   9922 	if (sc->sc_core_stopping) {
   9923 		WM_CORE_UNLOCK(sc);
   9924 		return 1;
   9925 	}
   9926 
   9927 	if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9928 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9929 		wm_linkintr(sc, icr);
   9930 	}
   9931 	if ((icr & ICR_GPI(0)) != 0)
   9932 		device_printf(sc->sc_dev, "got module interrupt\n");
   9933 
   9934 	WM_CORE_UNLOCK(sc);
   9935 
   9936 	if (icr & ICR_RXO) {
   9937 #if defined(WM_DEBUG)
   9938 		log(LOG_WARNING, "%s: Receive overrun\n",
   9939 		    device_xname(sc->sc_dev));
   9940 #endif /* defined(WM_DEBUG) */
   9941 	}
   9942 
   9943 	rnd_add_uint32(&sc->rnd_source, rndval);
   9944 
   9945 	if (more) {
   9946 		/* Try to get more packets going. */
   9947 		wm_legacy_intr_disable(sc);
   9948 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9949 		wm_sched_handle_queue(sc, wmq);
   9950 	}
   9951 
   9952 	return 1;
   9953 }
   9954 
   9955 static inline void
   9956 wm_txrxintr_disable(struct wm_queue *wmq)
   9957 {
   9958 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9959 
   9960 	if (__predict_false(!wm_is_using_msix(sc))) {
   9961 		return wm_legacy_intr_disable(sc);
   9962 	}
   9963 
   9964 	if (sc->sc_type == WM_T_82574)
   9965 		CSR_WRITE(sc, WMREG_IMC,
   9966 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9967 	else if (sc->sc_type == WM_T_82575)
   9968 		CSR_WRITE(sc, WMREG_EIMC,
   9969 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9970 	else
   9971 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9972 }
   9973 
   9974 static inline void
   9975 wm_txrxintr_enable(struct wm_queue *wmq)
   9976 {
   9977 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9978 
   9979 	wm_itrs_calculate(sc, wmq);
   9980 
   9981 	if (__predict_false(!wm_is_using_msix(sc))) {
   9982 		return wm_legacy_intr_enable(sc);
   9983 	}
   9984 
   9985 	/*
   9986 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9987 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9988 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9989 	 * while each wm_handle_queue(wmq) is runnig.
   9990 	 */
   9991 	if (sc->sc_type == WM_T_82574)
   9992 		CSR_WRITE(sc, WMREG_IMS,
   9993 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9994 	else if (sc->sc_type == WM_T_82575)
   9995 		CSR_WRITE(sc, WMREG_EIMS,
   9996 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9997 	else
   9998 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9999 }
   10000 
   10001 static int
   10002 wm_txrxintr_msix(void *arg)
   10003 {
   10004 	struct wm_queue *wmq = arg;
   10005 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10006 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10007 	struct wm_softc *sc = txq->txq_sc;
   10008 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10009 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10010 	bool txmore;
   10011 	bool rxmore;
   10012 
   10013 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   10014 
   10015 	DPRINTF(sc, WM_DEBUG_TX,
   10016 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   10017 
   10018 	wm_txrxintr_disable(wmq);
   10019 
   10020 	mutex_enter(txq->txq_lock);
   10021 
   10022 	if (txq->txq_stopping) {
   10023 		mutex_exit(txq->txq_lock);
   10024 		return 1;
   10025 	}
   10026 
   10027 	WM_Q_EVCNT_INCR(txq, txdw);
   10028 	txmore = wm_txeof(txq, txlimit);
   10029 	/* wm_deferred start() is done in wm_handle_queue(). */
   10030 	mutex_exit(txq->txq_lock);
   10031 
   10032 	DPRINTF(sc, WM_DEBUG_RX,
   10033 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   10034 	mutex_enter(rxq->rxq_lock);
   10035 
   10036 	if (rxq->rxq_stopping) {
   10037 		mutex_exit(rxq->rxq_lock);
   10038 		return 1;
   10039 	}
   10040 
   10041 	WM_Q_EVCNT_INCR(rxq, intr);
   10042 	rxmore = wm_rxeof(rxq, rxlimit);
   10043 	mutex_exit(rxq->rxq_lock);
   10044 
   10045 	wm_itrs_writereg(sc, wmq);
   10046 
   10047 	if (txmore || rxmore) {
   10048 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10049 		wm_sched_handle_queue(sc, wmq);
   10050 	} else
   10051 		wm_txrxintr_enable(wmq);
   10052 
   10053 	return 1;
   10054 }
   10055 
   10056 static void
   10057 wm_handle_queue(void *arg)
   10058 {
   10059 	struct wm_queue *wmq = arg;
   10060 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10061 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10062 	struct wm_softc *sc = txq->txq_sc;
   10063 	u_int txlimit = sc->sc_tx_process_limit;
   10064 	u_int rxlimit = sc->sc_rx_process_limit;
   10065 	bool txmore;
   10066 	bool rxmore;
   10067 
   10068 	mutex_enter(txq->txq_lock);
   10069 	if (txq->txq_stopping) {
   10070 		mutex_exit(txq->txq_lock);
   10071 		return;
   10072 	}
   10073 	txmore = wm_txeof(txq, txlimit);
   10074 	wm_deferred_start_locked(txq);
   10075 	mutex_exit(txq->txq_lock);
   10076 
   10077 	mutex_enter(rxq->rxq_lock);
   10078 	if (rxq->rxq_stopping) {
   10079 		mutex_exit(rxq->rxq_lock);
   10080 		return;
   10081 	}
   10082 	WM_Q_EVCNT_INCR(rxq, defer);
   10083 	rxmore = wm_rxeof(rxq, rxlimit);
   10084 	mutex_exit(rxq->rxq_lock);
   10085 
   10086 	if (txmore || rxmore) {
   10087 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10088 		wm_sched_handle_queue(sc, wmq);
   10089 	} else
   10090 		wm_txrxintr_enable(wmq);
   10091 }
   10092 
   10093 static void
   10094 wm_handle_queue_work(struct work *wk, void *context)
   10095 {
   10096 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   10097 
   10098 	/*
   10099 	 * "enqueued flag" is not required here.
   10100 	 */
   10101 	wm_handle_queue(wmq);
   10102 }
   10103 
   10104 /*
   10105  * wm_linkintr_msix:
   10106  *
   10107  *	Interrupt service routine for link status change for MSI-X.
   10108  */
   10109 static int
   10110 wm_linkintr_msix(void *arg)
   10111 {
   10112 	struct wm_softc *sc = arg;
   10113 	uint32_t reg;
   10114 	bool has_rxo;
   10115 
   10116 	reg = CSR_READ(sc, WMREG_ICR);
   10117 	WM_CORE_LOCK(sc);
   10118 	DPRINTF(sc, WM_DEBUG_LINK,
   10119 	    ("%s: LINK: got link intr. ICR = %08x\n",
   10120 		device_xname(sc->sc_dev), reg));
   10121 
   10122 	if (sc->sc_core_stopping)
   10123 		goto out;
   10124 
   10125 	if ((reg & ICR_LSC) != 0) {
   10126 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10127 		wm_linkintr(sc, ICR_LSC);
   10128 	}
   10129 	if ((reg & ICR_GPI(0)) != 0)
   10130 		device_printf(sc->sc_dev, "got module interrupt\n");
   10131 
   10132 	/*
   10133 	 * XXX 82574 MSI-X mode workaround
   10134 	 *
   10135 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   10136 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   10137 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   10138 	 * interrupts by writing WMREG_ICS to process receive packets.
   10139 	 */
   10140 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   10141 #if defined(WM_DEBUG)
   10142 		log(LOG_WARNING, "%s: Receive overrun\n",
   10143 		    device_xname(sc->sc_dev));
   10144 #endif /* defined(WM_DEBUG) */
   10145 
   10146 		has_rxo = true;
   10147 		/*
   10148 		 * The RXO interrupt is very high rate when receive traffic is
   10149 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   10150 		 * interrupts. ICR_OTHER will be enabled at the end of
   10151 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   10152 		 * ICR_RXQ(1) interrupts.
   10153 		 */
   10154 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   10155 
   10156 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   10157 	}
   10158 
   10159 
   10160 
   10161 out:
   10162 	WM_CORE_UNLOCK(sc);
   10163 
   10164 	if (sc->sc_type == WM_T_82574) {
   10165 		if (!has_rxo)
   10166 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   10167 		else
   10168 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   10169 	} else if (sc->sc_type == WM_T_82575)
   10170 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   10171 	else
   10172 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   10173 
   10174 	return 1;
   10175 }
   10176 
   10177 /*
   10178  * Media related.
   10179  * GMII, SGMII, TBI (and SERDES)
   10180  */
   10181 
   10182 /* Common */
   10183 
   10184 /*
   10185  * wm_tbi_serdes_set_linkled:
   10186  *
   10187  *	Update the link LED on TBI and SERDES devices.
   10188  */
   10189 static void
   10190 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   10191 {
   10192 
   10193 	if (sc->sc_tbi_linkup)
   10194 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   10195 	else
   10196 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   10197 
   10198 	/* 82540 or newer devices are active low */
   10199 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   10200 
   10201 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10202 }
   10203 
   10204 /* GMII related */
   10205 
   10206 /*
   10207  * wm_gmii_reset:
   10208  *
   10209  *	Reset the PHY.
   10210  */
   10211 static void
   10212 wm_gmii_reset(struct wm_softc *sc)
   10213 {
   10214 	uint32_t reg;
   10215 	int rv;
   10216 
   10217 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10218 		device_xname(sc->sc_dev), __func__));
   10219 
   10220 	rv = sc->phy.acquire(sc);
   10221 	if (rv != 0) {
   10222 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10223 		    __func__);
   10224 		return;
   10225 	}
   10226 
   10227 	switch (sc->sc_type) {
   10228 	case WM_T_82542_2_0:
   10229 	case WM_T_82542_2_1:
   10230 		/* null */
   10231 		break;
   10232 	case WM_T_82543:
   10233 		/*
   10234 		 * With 82543, we need to force speed and duplex on the MAC
   10235 		 * equal to what the PHY speed and duplex configuration is.
   10236 		 * In addition, we need to perform a hardware reset on the PHY
   10237 		 * to take it out of reset.
   10238 		 */
   10239 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10240 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10241 
   10242 		/* The PHY reset pin is active-low. */
   10243 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10244 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   10245 		    CTRL_EXT_SWDPIN(4));
   10246 		reg |= CTRL_EXT_SWDPIO(4);
   10247 
   10248 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10249 		CSR_WRITE_FLUSH(sc);
   10250 		delay(10*1000);
   10251 
   10252 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   10253 		CSR_WRITE_FLUSH(sc);
   10254 		delay(150);
   10255 #if 0
   10256 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   10257 #endif
   10258 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   10259 		break;
   10260 	case WM_T_82544:	/* Reset 10000us */
   10261 	case WM_T_82540:
   10262 	case WM_T_82545:
   10263 	case WM_T_82545_3:
   10264 	case WM_T_82546:
   10265 	case WM_T_82546_3:
   10266 	case WM_T_82541:
   10267 	case WM_T_82541_2:
   10268 	case WM_T_82547:
   10269 	case WM_T_82547_2:
   10270 	case WM_T_82571:	/* Reset 100us */
   10271 	case WM_T_82572:
   10272 	case WM_T_82573:
   10273 	case WM_T_82574:
   10274 	case WM_T_82575:
   10275 	case WM_T_82576:
   10276 	case WM_T_82580:
   10277 	case WM_T_I350:
   10278 	case WM_T_I354:
   10279 	case WM_T_I210:
   10280 	case WM_T_I211:
   10281 	case WM_T_82583:
   10282 	case WM_T_80003:
   10283 		/* Generic reset */
   10284 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10285 		CSR_WRITE_FLUSH(sc);
   10286 		delay(20000);
   10287 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10288 		CSR_WRITE_FLUSH(sc);
   10289 		delay(20000);
   10290 
   10291 		if ((sc->sc_type == WM_T_82541)
   10292 		    || (sc->sc_type == WM_T_82541_2)
   10293 		    || (sc->sc_type == WM_T_82547)
   10294 		    || (sc->sc_type == WM_T_82547_2)) {
   10295 			/* Workaround for igp are done in igp_reset() */
   10296 			/* XXX add code to set LED after phy reset */
   10297 		}
   10298 		break;
   10299 	case WM_T_ICH8:
   10300 	case WM_T_ICH9:
   10301 	case WM_T_ICH10:
   10302 	case WM_T_PCH:
   10303 	case WM_T_PCH2:
   10304 	case WM_T_PCH_LPT:
   10305 	case WM_T_PCH_SPT:
   10306 	case WM_T_PCH_CNP:
   10307 		/* Generic reset */
   10308 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10309 		CSR_WRITE_FLUSH(sc);
   10310 		delay(100);
   10311 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10312 		CSR_WRITE_FLUSH(sc);
   10313 		delay(150);
   10314 		break;
   10315 	default:
   10316 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   10317 		    __func__);
   10318 		break;
   10319 	}
   10320 
   10321 	sc->phy.release(sc);
   10322 
   10323 	/* get_cfg_done */
   10324 	wm_get_cfg_done(sc);
   10325 
   10326 	/* Extra setup */
   10327 	switch (sc->sc_type) {
   10328 	case WM_T_82542_2_0:
   10329 	case WM_T_82542_2_1:
   10330 	case WM_T_82543:
   10331 	case WM_T_82544:
   10332 	case WM_T_82540:
   10333 	case WM_T_82545:
   10334 	case WM_T_82545_3:
   10335 	case WM_T_82546:
   10336 	case WM_T_82546_3:
   10337 	case WM_T_82541_2:
   10338 	case WM_T_82547_2:
   10339 	case WM_T_82571:
   10340 	case WM_T_82572:
   10341 	case WM_T_82573:
   10342 	case WM_T_82574:
   10343 	case WM_T_82583:
   10344 	case WM_T_82575:
   10345 	case WM_T_82576:
   10346 	case WM_T_82580:
   10347 	case WM_T_I350:
   10348 	case WM_T_I354:
   10349 	case WM_T_I210:
   10350 	case WM_T_I211:
   10351 	case WM_T_80003:
   10352 		/* Null */
   10353 		break;
   10354 	case WM_T_82541:
   10355 	case WM_T_82547:
   10356 		/* XXX Configure actively LED after PHY reset */
   10357 		break;
   10358 	case WM_T_ICH8:
   10359 	case WM_T_ICH9:
   10360 	case WM_T_ICH10:
   10361 	case WM_T_PCH:
   10362 	case WM_T_PCH2:
   10363 	case WM_T_PCH_LPT:
   10364 	case WM_T_PCH_SPT:
   10365 	case WM_T_PCH_CNP:
   10366 		wm_phy_post_reset(sc);
   10367 		break;
   10368 	default:
   10369 		panic("%s: unknown type\n", __func__);
   10370 		break;
   10371 	}
   10372 }
   10373 
   10374 /*
   10375  * Setup sc_phytype and mii_{read|write}reg.
   10376  *
   10377  *  To identify PHY type, correct read/write function should be selected.
   10378  * To select correct read/write function, PCI ID or MAC type are required
   10379  * without accessing PHY registers.
   10380  *
   10381  *  On the first call of this function, PHY ID is not known yet. Check
   10382  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   10383  * result might be incorrect.
   10384  *
   10385  *  In the second call, PHY OUI and model is used to identify PHY type.
   10386  * It might not be perfect because of the lack of compared entry, but it
   10387  * would be better than the first call.
   10388  *
   10389  *  If the detected new result and previous assumption is different,
   10390  * diagnous message will be printed.
   10391  */
   10392 static void
   10393 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   10394     uint16_t phy_model)
   10395 {
   10396 	device_t dev = sc->sc_dev;
   10397 	struct mii_data *mii = &sc->sc_mii;
   10398 	uint16_t new_phytype = WMPHY_UNKNOWN;
   10399 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   10400 	mii_readreg_t new_readreg;
   10401 	mii_writereg_t new_writereg;
   10402 	bool dodiag = true;
   10403 
   10404 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10405 		device_xname(sc->sc_dev), __func__));
   10406 
   10407 	/*
   10408 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   10409 	 * incorrect. So don't print diag output when it's 2nd call.
   10410 	 */
   10411 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   10412 		dodiag = false;
   10413 
   10414 	if (mii->mii_readreg == NULL) {
   10415 		/*
   10416 		 *  This is the first call of this function. For ICH and PCH
   10417 		 * variants, it's difficult to determine the PHY access method
   10418 		 * by sc_type, so use the PCI product ID for some devices.
   10419 		 */
   10420 
   10421 		switch (sc->sc_pcidevid) {
   10422 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   10423 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   10424 			/* 82577 */
   10425 			new_phytype = WMPHY_82577;
   10426 			break;
   10427 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   10428 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   10429 			/* 82578 */
   10430 			new_phytype = WMPHY_82578;
   10431 			break;
   10432 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   10433 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   10434 			/* 82579 */
   10435 			new_phytype = WMPHY_82579;
   10436 			break;
   10437 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   10438 		case PCI_PRODUCT_INTEL_82801I_BM:
   10439 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   10440 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   10441 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   10442 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   10443 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   10444 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   10445 			/* ICH8, 9, 10 with 82567 */
   10446 			new_phytype = WMPHY_BM;
   10447 			break;
   10448 		default:
   10449 			break;
   10450 		}
   10451 	} else {
   10452 		/* It's not the first call. Use PHY OUI and model */
   10453 		switch (phy_oui) {
   10454 		case MII_OUI_ATTANSIC: /* XXX ??? */
   10455 			switch (phy_model) {
   10456 			case 0x0004: /* XXX */
   10457 				new_phytype = WMPHY_82578;
   10458 				break;
   10459 			default:
   10460 				break;
   10461 			}
   10462 			break;
   10463 		case MII_OUI_xxMARVELL:
   10464 			switch (phy_model) {
   10465 			case MII_MODEL_xxMARVELL_I210:
   10466 				new_phytype = WMPHY_I210;
   10467 				break;
   10468 			case MII_MODEL_xxMARVELL_E1011:
   10469 			case MII_MODEL_xxMARVELL_E1000_3:
   10470 			case MII_MODEL_xxMARVELL_E1000_5:
   10471 			case MII_MODEL_xxMARVELL_E1112:
   10472 				new_phytype = WMPHY_M88;
   10473 				break;
   10474 			case MII_MODEL_xxMARVELL_E1149:
   10475 				new_phytype = WMPHY_BM;
   10476 				break;
   10477 			case MII_MODEL_xxMARVELL_E1111:
   10478 			case MII_MODEL_xxMARVELL_I347:
   10479 			case MII_MODEL_xxMARVELL_E1512:
   10480 			case MII_MODEL_xxMARVELL_E1340M:
   10481 			case MII_MODEL_xxMARVELL_E1543:
   10482 				new_phytype = WMPHY_M88;
   10483 				break;
   10484 			case MII_MODEL_xxMARVELL_I82563:
   10485 				new_phytype = WMPHY_GG82563;
   10486 				break;
   10487 			default:
   10488 				break;
   10489 			}
   10490 			break;
   10491 		case MII_OUI_INTEL:
   10492 			switch (phy_model) {
   10493 			case MII_MODEL_INTEL_I82577:
   10494 				new_phytype = WMPHY_82577;
   10495 				break;
   10496 			case MII_MODEL_INTEL_I82579:
   10497 				new_phytype = WMPHY_82579;
   10498 				break;
   10499 			case MII_MODEL_INTEL_I217:
   10500 				new_phytype = WMPHY_I217;
   10501 				break;
   10502 			case MII_MODEL_INTEL_I82580:
   10503 				new_phytype = WMPHY_82580;
   10504 				break;
   10505 			case MII_MODEL_INTEL_I350:
   10506 				new_phytype = WMPHY_I350;
   10507 				break;
   10508 				break;
   10509 			default:
   10510 				break;
   10511 			}
   10512 			break;
   10513 		case MII_OUI_yyINTEL:
   10514 			switch (phy_model) {
   10515 			case MII_MODEL_yyINTEL_I82562G:
   10516 			case MII_MODEL_yyINTEL_I82562EM:
   10517 			case MII_MODEL_yyINTEL_I82562ET:
   10518 				new_phytype = WMPHY_IFE;
   10519 				break;
   10520 			case MII_MODEL_yyINTEL_IGP01E1000:
   10521 				new_phytype = WMPHY_IGP;
   10522 				break;
   10523 			case MII_MODEL_yyINTEL_I82566:
   10524 				new_phytype = WMPHY_IGP_3;
   10525 				break;
   10526 			default:
   10527 				break;
   10528 			}
   10529 			break;
   10530 		default:
   10531 			break;
   10532 		}
   10533 
   10534 		if (dodiag) {
   10535 			if (new_phytype == WMPHY_UNKNOWN)
   10536 				aprint_verbose_dev(dev,
   10537 				    "%s: Unknown PHY model. OUI=%06x, "
   10538 				    "model=%04x\n", __func__, phy_oui,
   10539 				    phy_model);
   10540 
   10541 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10542 			    && (sc->sc_phytype != new_phytype)) {
   10543 				aprint_error_dev(dev, "Previously assumed PHY "
   10544 				    "type(%u) was incorrect. PHY type from PHY"
   10545 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   10546 			}
   10547 		}
   10548 	}
   10549 
   10550 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10551 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10552 		/* SGMII */
   10553 		new_readreg = wm_sgmii_readreg;
   10554 		new_writereg = wm_sgmii_writereg;
   10555 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10556 		/* BM2 (phyaddr == 1) */
   10557 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10558 		    && (new_phytype != WMPHY_BM)
   10559 		    && (new_phytype != WMPHY_UNKNOWN))
   10560 			doubt_phytype = new_phytype;
   10561 		new_phytype = WMPHY_BM;
   10562 		new_readreg = wm_gmii_bm_readreg;
   10563 		new_writereg = wm_gmii_bm_writereg;
   10564 	} else if (sc->sc_type >= WM_T_PCH) {
   10565 		/* All PCH* use _hv_ */
   10566 		new_readreg = wm_gmii_hv_readreg;
   10567 		new_writereg = wm_gmii_hv_writereg;
   10568 	} else if (sc->sc_type >= WM_T_ICH8) {
   10569 		/* non-82567 ICH8, 9 and 10 */
   10570 		new_readreg = wm_gmii_i82544_readreg;
   10571 		new_writereg = wm_gmii_i82544_writereg;
   10572 	} else if (sc->sc_type >= WM_T_80003) {
   10573 		/* 80003 */
   10574 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10575 		    && (new_phytype != WMPHY_GG82563)
   10576 		    && (new_phytype != WMPHY_UNKNOWN))
   10577 			doubt_phytype = new_phytype;
   10578 		new_phytype = WMPHY_GG82563;
   10579 		new_readreg = wm_gmii_i80003_readreg;
   10580 		new_writereg = wm_gmii_i80003_writereg;
   10581 	} else if (sc->sc_type >= WM_T_I210) {
   10582 		/* I210 and I211 */
   10583 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10584 		    && (new_phytype != WMPHY_I210)
   10585 		    && (new_phytype != WMPHY_UNKNOWN))
   10586 			doubt_phytype = new_phytype;
   10587 		new_phytype = WMPHY_I210;
   10588 		new_readreg = wm_gmii_gs40g_readreg;
   10589 		new_writereg = wm_gmii_gs40g_writereg;
   10590 	} else if (sc->sc_type >= WM_T_82580) {
   10591 		/* 82580, I350 and I354 */
   10592 		new_readreg = wm_gmii_82580_readreg;
   10593 		new_writereg = wm_gmii_82580_writereg;
   10594 	} else if (sc->sc_type >= WM_T_82544) {
   10595 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10596 		new_readreg = wm_gmii_i82544_readreg;
   10597 		new_writereg = wm_gmii_i82544_writereg;
   10598 	} else {
   10599 		new_readreg = wm_gmii_i82543_readreg;
   10600 		new_writereg = wm_gmii_i82543_writereg;
   10601 	}
   10602 
   10603 	if (new_phytype == WMPHY_BM) {
   10604 		/* All BM use _bm_ */
   10605 		new_readreg = wm_gmii_bm_readreg;
   10606 		new_writereg = wm_gmii_bm_writereg;
   10607 	}
   10608 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10609 		/* All PCH* use _hv_ */
   10610 		new_readreg = wm_gmii_hv_readreg;
   10611 		new_writereg = wm_gmii_hv_writereg;
   10612 	}
   10613 
   10614 	/* Diag output */
   10615 	if (dodiag) {
   10616 		if (doubt_phytype != WMPHY_UNKNOWN)
   10617 			aprint_error_dev(dev, "Assumed new PHY type was "
   10618 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10619 			    new_phytype);
   10620 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10621 		    && (sc->sc_phytype != new_phytype))
   10622 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10623 			    "was incorrect. New PHY type = %u\n",
   10624 			    sc->sc_phytype, new_phytype);
   10625 
   10626 		if ((mii->mii_readreg != NULL) &&
   10627 		    (new_phytype == WMPHY_UNKNOWN))
   10628 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   10629 
   10630 		if ((mii->mii_readreg != NULL) &&
   10631 		    (mii->mii_readreg != new_readreg))
   10632 			aprint_error_dev(dev, "Previously assumed PHY "
   10633 			    "read/write function was incorrect.\n");
   10634 	}
   10635 
   10636 	/* Update now */
   10637 	sc->sc_phytype = new_phytype;
   10638 	mii->mii_readreg = new_readreg;
   10639 	mii->mii_writereg = new_writereg;
   10640 	if (new_readreg == wm_gmii_hv_readreg) {
   10641 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10642 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10643 	} else if (new_readreg == wm_sgmii_readreg) {
   10644 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10645 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10646 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10647 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10648 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10649 	}
   10650 }
   10651 
   10652 /*
   10653  * wm_get_phy_id_82575:
   10654  *
   10655  * Return PHY ID. Return -1 if it failed.
   10656  */
   10657 static int
   10658 wm_get_phy_id_82575(struct wm_softc *sc)
   10659 {
   10660 	uint32_t reg;
   10661 	int phyid = -1;
   10662 
   10663 	/* XXX */
   10664 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10665 		return -1;
   10666 
   10667 	if (wm_sgmii_uses_mdio(sc)) {
   10668 		switch (sc->sc_type) {
   10669 		case WM_T_82575:
   10670 		case WM_T_82576:
   10671 			reg = CSR_READ(sc, WMREG_MDIC);
   10672 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10673 			break;
   10674 		case WM_T_82580:
   10675 		case WM_T_I350:
   10676 		case WM_T_I354:
   10677 		case WM_T_I210:
   10678 		case WM_T_I211:
   10679 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10680 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10681 			break;
   10682 		default:
   10683 			return -1;
   10684 		}
   10685 	}
   10686 
   10687 	return phyid;
   10688 }
   10689 
   10690 /*
   10691  * wm_gmii_mediainit:
   10692  *
   10693  *	Initialize media for use on 1000BASE-T devices.
   10694  */
   10695 static void
   10696 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10697 {
   10698 	device_t dev = sc->sc_dev;
   10699 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10700 	struct mii_data *mii = &sc->sc_mii;
   10701 
   10702 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   10703 		device_xname(sc->sc_dev), __func__));
   10704 
   10705 	/* We have GMII. */
   10706 	sc->sc_flags |= WM_F_HAS_MII;
   10707 
   10708 	if (sc->sc_type == WM_T_80003)
   10709 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10710 	else
   10711 		sc->sc_tipg = TIPG_1000T_DFLT;
   10712 
   10713 	/*
   10714 	 * Let the chip set speed/duplex on its own based on
   10715 	 * signals from the PHY.
   10716 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10717 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10718 	 */
   10719 	sc->sc_ctrl |= CTRL_SLU;
   10720 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10721 
   10722 	/* Initialize our media structures and probe the GMII. */
   10723 	mii->mii_ifp = ifp;
   10724 
   10725 	mii->mii_statchg = wm_gmii_statchg;
   10726 
   10727 	/* get PHY control from SMBus to PCIe */
   10728 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10729 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10730 	    || (sc->sc_type == WM_T_PCH_CNP))
   10731 		wm_init_phy_workarounds_pchlan(sc);
   10732 
   10733 	wm_gmii_reset(sc);
   10734 
   10735 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10736 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10737 	    wm_gmii_mediastatus, sc->sc_core_lock);
   10738 
   10739 	/* Setup internal SGMII PHY for SFP */
   10740 	wm_sgmii_sfp_preconfig(sc);
   10741 
   10742 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10743 	    || (sc->sc_type == WM_T_82580)
   10744 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10745 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10746 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10747 			/* Attach only one port */
   10748 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10749 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10750 		} else {
   10751 			int i, id;
   10752 			uint32_t ctrl_ext;
   10753 
   10754 			id = wm_get_phy_id_82575(sc);
   10755 			if (id != -1) {
   10756 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10757 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10758 			}
   10759 			if ((id == -1)
   10760 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10761 				/* Power on sgmii phy if it is disabled */
   10762 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10763 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10764 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10765 				CSR_WRITE_FLUSH(sc);
   10766 				delay(300*1000); /* XXX too long */
   10767 
   10768 				/*
   10769 				 * From 1 to 8.
   10770 				 *
   10771 				 * I2C access fails with I2C register's ERROR
   10772 				 * bit set, so prevent error message while
   10773 				 * scanning.
   10774 				 */
   10775 				sc->phy.no_errprint = true;
   10776 				for (i = 1; i < 8; i++)
   10777 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10778 					    0xffffffff, i, MII_OFFSET_ANY,
   10779 					    MIIF_DOPAUSE);
   10780 				sc->phy.no_errprint = false;
   10781 
   10782 				/* Restore previous sfp cage power state */
   10783 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10784 			}
   10785 		}
   10786 	} else
   10787 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10788 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10789 
   10790 	/*
   10791 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10792 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10793 	 */
   10794 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10795 		|| (sc->sc_type == WM_T_PCH_SPT)
   10796 		|| (sc->sc_type == WM_T_PCH_CNP))
   10797 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10798 		wm_set_mdio_slow_mode_hv(sc);
   10799 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10800 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10801 	}
   10802 
   10803 	/*
   10804 	 * (For ICH8 variants)
   10805 	 * If PHY detection failed, use BM's r/w function and retry.
   10806 	 */
   10807 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10808 		/* if failed, retry with *_bm_* */
   10809 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10810 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10811 		    sc->sc_phytype);
   10812 		sc->sc_phytype = WMPHY_BM;
   10813 		mii->mii_readreg = wm_gmii_bm_readreg;
   10814 		mii->mii_writereg = wm_gmii_bm_writereg;
   10815 
   10816 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10817 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10818 	}
   10819 
   10820 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10821 		/* Any PHY wasn't find */
   10822 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10823 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10824 		sc->sc_phytype = WMPHY_NONE;
   10825 	} else {
   10826 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10827 
   10828 		/*
   10829 		 * PHY Found! Check PHY type again by the second call of
   10830 		 * wm_gmii_setup_phytype.
   10831 		 */
   10832 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10833 		    child->mii_mpd_model);
   10834 
   10835 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10836 	}
   10837 }
   10838 
   10839 /*
   10840  * wm_gmii_mediachange:	[ifmedia interface function]
   10841  *
   10842  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10843  */
   10844 static int
   10845 wm_gmii_mediachange(struct ifnet *ifp)
   10846 {
   10847 	struct wm_softc *sc = ifp->if_softc;
   10848 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10849 	uint32_t reg;
   10850 	int rc;
   10851 
   10852 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   10853 		device_xname(sc->sc_dev), __func__));
   10854 	if ((ifp->if_flags & IFF_UP) == 0)
   10855 		return 0;
   10856 
   10857 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10858 	if ((sc->sc_type == WM_T_82580)
   10859 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10860 	    || (sc->sc_type == WM_T_I211)) {
   10861 		reg = CSR_READ(sc, WMREG_PHPM);
   10862 		reg &= ~PHPM_GO_LINK_D;
   10863 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10864 	}
   10865 
   10866 	/* Disable D0 LPLU. */
   10867 	wm_lplu_d0_disable(sc);
   10868 
   10869 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10870 	sc->sc_ctrl |= CTRL_SLU;
   10871 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10872 	    || (sc->sc_type > WM_T_82543)) {
   10873 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10874 	} else {
   10875 		sc->sc_ctrl &= ~CTRL_ASDE;
   10876 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10877 		if (ife->ifm_media & IFM_FDX)
   10878 			sc->sc_ctrl |= CTRL_FD;
   10879 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10880 		case IFM_10_T:
   10881 			sc->sc_ctrl |= CTRL_SPEED_10;
   10882 			break;
   10883 		case IFM_100_TX:
   10884 			sc->sc_ctrl |= CTRL_SPEED_100;
   10885 			break;
   10886 		case IFM_1000_T:
   10887 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10888 			break;
   10889 		case IFM_NONE:
   10890 			/* There is no specific setting for IFM_NONE */
   10891 			break;
   10892 		default:
   10893 			panic("wm_gmii_mediachange: bad media 0x%x",
   10894 			    ife->ifm_media);
   10895 		}
   10896 	}
   10897 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10898 	CSR_WRITE_FLUSH(sc);
   10899 
   10900 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   10901 		wm_serdes_mediachange(ifp);
   10902 
   10903 	if (sc->sc_type <= WM_T_82543)
   10904 		wm_gmii_reset(sc);
   10905 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   10906 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   10907 		/* allow time for SFP cage time to power up phy */
   10908 		delay(300 * 1000);
   10909 		wm_gmii_reset(sc);
   10910 	}
   10911 
   10912 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10913 		return 0;
   10914 	return rc;
   10915 }
   10916 
   10917 /*
   10918  * wm_gmii_mediastatus:	[ifmedia interface function]
   10919  *
   10920  *	Get the current interface media status on a 1000BASE-T device.
   10921  */
   10922 static void
   10923 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10924 {
   10925 	struct wm_softc *sc = ifp->if_softc;
   10926 
   10927 	ether_mediastatus(ifp, ifmr);
   10928 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10929 	    | sc->sc_flowflags;
   10930 }
   10931 
   10932 #define	MDI_IO		CTRL_SWDPIN(2)
   10933 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10934 #define	MDI_CLK		CTRL_SWDPIN(3)
   10935 
   10936 static void
   10937 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10938 {
   10939 	uint32_t i, v;
   10940 
   10941 	v = CSR_READ(sc, WMREG_CTRL);
   10942 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10943 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10944 
   10945 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   10946 		if (data & i)
   10947 			v |= MDI_IO;
   10948 		else
   10949 			v &= ~MDI_IO;
   10950 		CSR_WRITE(sc, WMREG_CTRL, v);
   10951 		CSR_WRITE_FLUSH(sc);
   10952 		delay(10);
   10953 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10954 		CSR_WRITE_FLUSH(sc);
   10955 		delay(10);
   10956 		CSR_WRITE(sc, WMREG_CTRL, v);
   10957 		CSR_WRITE_FLUSH(sc);
   10958 		delay(10);
   10959 	}
   10960 }
   10961 
   10962 static uint16_t
   10963 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10964 {
   10965 	uint32_t v, i;
   10966 	uint16_t data = 0;
   10967 
   10968 	v = CSR_READ(sc, WMREG_CTRL);
   10969 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10970 	v |= CTRL_SWDPIO(3);
   10971 
   10972 	CSR_WRITE(sc, WMREG_CTRL, v);
   10973 	CSR_WRITE_FLUSH(sc);
   10974 	delay(10);
   10975 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10976 	CSR_WRITE_FLUSH(sc);
   10977 	delay(10);
   10978 	CSR_WRITE(sc, WMREG_CTRL, v);
   10979 	CSR_WRITE_FLUSH(sc);
   10980 	delay(10);
   10981 
   10982 	for (i = 0; i < 16; i++) {
   10983 		data <<= 1;
   10984 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10985 		CSR_WRITE_FLUSH(sc);
   10986 		delay(10);
   10987 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10988 			data |= 1;
   10989 		CSR_WRITE(sc, WMREG_CTRL, v);
   10990 		CSR_WRITE_FLUSH(sc);
   10991 		delay(10);
   10992 	}
   10993 
   10994 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10995 	CSR_WRITE_FLUSH(sc);
   10996 	delay(10);
   10997 	CSR_WRITE(sc, WMREG_CTRL, v);
   10998 	CSR_WRITE_FLUSH(sc);
   10999 	delay(10);
   11000 
   11001 	return data;
   11002 }
   11003 
   11004 #undef MDI_IO
   11005 #undef MDI_DIR
   11006 #undef MDI_CLK
   11007 
   11008 /*
   11009  * wm_gmii_i82543_readreg:	[mii interface function]
   11010  *
   11011  *	Read a PHY register on the GMII (i82543 version).
   11012  */
   11013 static int
   11014 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11015 {
   11016 	struct wm_softc *sc = device_private(dev);
   11017 
   11018 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11019 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   11020 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   11021 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   11022 
   11023 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   11024 		device_xname(dev), phy, reg, *val));
   11025 
   11026 	return 0;
   11027 }
   11028 
   11029 /*
   11030  * wm_gmii_i82543_writereg:	[mii interface function]
   11031  *
   11032  *	Write a PHY register on the GMII (i82543 version).
   11033  */
   11034 static int
   11035 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   11036 {
   11037 	struct wm_softc *sc = device_private(dev);
   11038 
   11039 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11040 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   11041 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   11042 	    (MII_COMMAND_START << 30), 32);
   11043 
   11044 	return 0;
   11045 }
   11046 
   11047 /*
   11048  * wm_gmii_mdic_readreg:	[mii interface function]
   11049  *
   11050  *	Read a PHY register on the GMII.
   11051  */
   11052 static int
   11053 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11054 {
   11055 	struct wm_softc *sc = device_private(dev);
   11056 	uint32_t mdic = 0;
   11057 	int i;
   11058 
   11059 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11060 	    && (reg > MII_ADDRMASK)) {
   11061 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11062 		    __func__, sc->sc_phytype, reg);
   11063 		reg &= MII_ADDRMASK;
   11064 	}
   11065 
   11066 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   11067 	    MDIC_REGADD(reg));
   11068 
   11069 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11070 		delay(50);
   11071 		mdic = CSR_READ(sc, WMREG_MDIC);
   11072 		if (mdic & MDIC_READY)
   11073 			break;
   11074 	}
   11075 
   11076 	if ((mdic & MDIC_READY) == 0) {
   11077 		DPRINTF(sc, WM_DEBUG_GMII,
   11078 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   11079 			device_xname(dev), phy, reg));
   11080 		return ETIMEDOUT;
   11081 	} else if (mdic & MDIC_E) {
   11082 		/* This is normal if no PHY is present. */
   11083 		DPRINTF(sc, WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   11084 			device_xname(sc->sc_dev), phy, reg));
   11085 		return -1;
   11086 	} else
   11087 		*val = MDIC_DATA(mdic);
   11088 
   11089 	/*
   11090 	 * Allow some time after each MDIC transaction to avoid
   11091 	 * reading duplicate data in the next MDIC transaction.
   11092 	 */
   11093 	if (sc->sc_type == WM_T_PCH2)
   11094 		delay(100);
   11095 
   11096 	return 0;
   11097 }
   11098 
   11099 /*
   11100  * wm_gmii_mdic_writereg:	[mii interface function]
   11101  *
   11102  *	Write a PHY register on the GMII.
   11103  */
   11104 static int
   11105 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   11106 {
   11107 	struct wm_softc *sc = device_private(dev);
   11108 	uint32_t mdic = 0;
   11109 	int i;
   11110 
   11111 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11112 	    && (reg > MII_ADDRMASK)) {
   11113 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11114 		    __func__, sc->sc_phytype, reg);
   11115 		reg &= MII_ADDRMASK;
   11116 	}
   11117 
   11118 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   11119 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   11120 
   11121 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11122 		delay(50);
   11123 		mdic = CSR_READ(sc, WMREG_MDIC);
   11124 		if (mdic & MDIC_READY)
   11125 			break;
   11126 	}
   11127 
   11128 	if ((mdic & MDIC_READY) == 0) {
   11129 		DPRINTF(sc, WM_DEBUG_GMII,
   11130 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   11131 			device_xname(dev), phy, reg));
   11132 		return ETIMEDOUT;
   11133 	} else if (mdic & MDIC_E) {
   11134 		DPRINTF(sc, WM_DEBUG_GMII,
   11135 		    ("%s: MDIC write error: phy %d reg %d\n",
   11136 			device_xname(dev), phy, reg));
   11137 		return -1;
   11138 	}
   11139 
   11140 	/*
   11141 	 * Allow some time after each MDIC transaction to avoid
   11142 	 * reading duplicate data in the next MDIC transaction.
   11143 	 */
   11144 	if (sc->sc_type == WM_T_PCH2)
   11145 		delay(100);
   11146 
   11147 	return 0;
   11148 }
   11149 
   11150 /*
   11151  * wm_gmii_i82544_readreg:	[mii interface function]
   11152  *
   11153  *	Read a PHY register on the GMII.
   11154  */
   11155 static int
   11156 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11157 {
   11158 	struct wm_softc *sc = device_private(dev);
   11159 	int rv;
   11160 
   11161 	if (sc->phy.acquire(sc)) {
   11162 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11163 		return -1;
   11164 	}
   11165 
   11166 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   11167 
   11168 	sc->phy.release(sc);
   11169 
   11170 	return rv;
   11171 }
   11172 
   11173 static int
   11174 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11175 {
   11176 	struct wm_softc *sc = device_private(dev);
   11177 	int rv;
   11178 
   11179 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11180 		switch (sc->sc_phytype) {
   11181 		case WMPHY_IGP:
   11182 		case WMPHY_IGP_2:
   11183 		case WMPHY_IGP_3:
   11184 			rv = wm_gmii_mdic_writereg(dev, phy,
   11185 			    IGPHY_PAGE_SELECT, reg);
   11186 			if (rv != 0)
   11187 				return rv;
   11188 			break;
   11189 		default:
   11190 #ifdef WM_DEBUG
   11191 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   11192 			    __func__, sc->sc_phytype, reg);
   11193 #endif
   11194 			break;
   11195 		}
   11196 	}
   11197 
   11198 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11199 }
   11200 
   11201 /*
   11202  * wm_gmii_i82544_writereg:	[mii interface function]
   11203  *
   11204  *	Write a PHY register on the GMII.
   11205  */
   11206 static int
   11207 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   11208 {
   11209 	struct wm_softc *sc = device_private(dev);
   11210 	int rv;
   11211 
   11212 	if (sc->phy.acquire(sc)) {
   11213 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11214 		return -1;
   11215 	}
   11216 
   11217 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   11218 	sc->phy.release(sc);
   11219 
   11220 	return rv;
   11221 }
   11222 
   11223 static int
   11224 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11225 {
   11226 	struct wm_softc *sc = device_private(dev);
   11227 	int rv;
   11228 
   11229 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11230 		switch (sc->sc_phytype) {
   11231 		case WMPHY_IGP:
   11232 		case WMPHY_IGP_2:
   11233 		case WMPHY_IGP_3:
   11234 			rv = wm_gmii_mdic_writereg(dev, phy,
   11235 			    IGPHY_PAGE_SELECT, reg);
   11236 			if (rv != 0)
   11237 				return rv;
   11238 			break;
   11239 		default:
   11240 #ifdef WM_DEBUG
   11241 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   11242 			    __func__, sc->sc_phytype, reg);
   11243 #endif
   11244 			break;
   11245 		}
   11246 	}
   11247 
   11248 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11249 }
   11250 
   11251 /*
   11252  * wm_gmii_i80003_readreg:	[mii interface function]
   11253  *
   11254  *	Read a PHY register on the kumeran
   11255  * This could be handled by the PHY layer if we didn't have to lock the
   11256  * resource ...
   11257  */
   11258 static int
   11259 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11260 {
   11261 	struct wm_softc *sc = device_private(dev);
   11262 	int page_select;
   11263 	uint16_t temp, temp2;
   11264 	int rv = 0;
   11265 
   11266 	if (phy != 1) /* Only one PHY on kumeran bus */
   11267 		return -1;
   11268 
   11269 	if (sc->phy.acquire(sc)) {
   11270 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11271 		return -1;
   11272 	}
   11273 
   11274 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11275 		page_select = GG82563_PHY_PAGE_SELECT;
   11276 	else {
   11277 		/*
   11278 		 * Use Alternative Page Select register to access registers
   11279 		 * 30 and 31.
   11280 		 */
   11281 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11282 	}
   11283 	temp = reg >> GG82563_PAGE_SHIFT;
   11284 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11285 		goto out;
   11286 
   11287 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11288 		/*
   11289 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11290 		 * register.
   11291 		 */
   11292 		delay(200);
   11293 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11294 		if ((rv != 0) || (temp2 != temp)) {
   11295 			device_printf(dev, "%s failed\n", __func__);
   11296 			rv = -1;
   11297 			goto out;
   11298 		}
   11299 		delay(200);
   11300 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11301 		delay(200);
   11302 	} else
   11303 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11304 
   11305 out:
   11306 	sc->phy.release(sc);
   11307 	return rv;
   11308 }
   11309 
   11310 /*
   11311  * wm_gmii_i80003_writereg:	[mii interface function]
   11312  *
   11313  *	Write a PHY register on the kumeran.
   11314  * This could be handled by the PHY layer if we didn't have to lock the
   11315  * resource ...
   11316  */
   11317 static int
   11318 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   11319 {
   11320 	struct wm_softc *sc = device_private(dev);
   11321 	int page_select, rv;
   11322 	uint16_t temp, temp2;
   11323 
   11324 	if (phy != 1) /* Only one PHY on kumeran bus */
   11325 		return -1;
   11326 
   11327 	if (sc->phy.acquire(sc)) {
   11328 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11329 		return -1;
   11330 	}
   11331 
   11332 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11333 		page_select = GG82563_PHY_PAGE_SELECT;
   11334 	else {
   11335 		/*
   11336 		 * Use Alternative Page Select register to access registers
   11337 		 * 30 and 31.
   11338 		 */
   11339 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11340 	}
   11341 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   11342 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11343 		goto out;
   11344 
   11345 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11346 		/*
   11347 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11348 		 * register.
   11349 		 */
   11350 		delay(200);
   11351 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11352 		if ((rv != 0) || (temp2 != temp)) {
   11353 			device_printf(dev, "%s failed\n", __func__);
   11354 			rv = -1;
   11355 			goto out;
   11356 		}
   11357 		delay(200);
   11358 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11359 		delay(200);
   11360 	} else
   11361 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11362 
   11363 out:
   11364 	sc->phy.release(sc);
   11365 	return rv;
   11366 }
   11367 
   11368 /*
   11369  * wm_gmii_bm_readreg:	[mii interface function]
   11370  *
   11371  *	Read a PHY register on the kumeran
   11372  * This could be handled by the PHY layer if we didn't have to lock the
   11373  * resource ...
   11374  */
   11375 static int
   11376 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11377 {
   11378 	struct wm_softc *sc = device_private(dev);
   11379 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11380 	int rv;
   11381 
   11382 	if (sc->phy.acquire(sc)) {
   11383 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11384 		return -1;
   11385 	}
   11386 
   11387 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11388 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11389 		    || (reg == 31)) ? 1 : phy;
   11390 	/* Page 800 works differently than the rest so it has its own func */
   11391 	if (page == BM_WUC_PAGE) {
   11392 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11393 		goto release;
   11394 	}
   11395 
   11396 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11397 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11398 		    && (sc->sc_type != WM_T_82583))
   11399 			rv = wm_gmii_mdic_writereg(dev, phy,
   11400 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11401 		else
   11402 			rv = wm_gmii_mdic_writereg(dev, phy,
   11403 			    BME1000_PHY_PAGE_SELECT, page);
   11404 		if (rv != 0)
   11405 			goto release;
   11406 	}
   11407 
   11408 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11409 
   11410 release:
   11411 	sc->phy.release(sc);
   11412 	return rv;
   11413 }
   11414 
   11415 /*
   11416  * wm_gmii_bm_writereg:	[mii interface function]
   11417  *
   11418  *	Write a PHY register on the kumeran.
   11419  * This could be handled by the PHY layer if we didn't have to lock the
   11420  * resource ...
   11421  */
   11422 static int
   11423 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   11424 {
   11425 	struct wm_softc *sc = device_private(dev);
   11426 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11427 	int rv;
   11428 
   11429 	if (sc->phy.acquire(sc)) {
   11430 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11431 		return -1;
   11432 	}
   11433 
   11434 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11435 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11436 		    || (reg == 31)) ? 1 : phy;
   11437 	/* Page 800 works differently than the rest so it has its own func */
   11438 	if (page == BM_WUC_PAGE) {
   11439 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   11440 		goto release;
   11441 	}
   11442 
   11443 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11444 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11445 		    && (sc->sc_type != WM_T_82583))
   11446 			rv = wm_gmii_mdic_writereg(dev, phy,
   11447 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11448 		else
   11449 			rv = wm_gmii_mdic_writereg(dev, phy,
   11450 			    BME1000_PHY_PAGE_SELECT, page);
   11451 		if (rv != 0)
   11452 			goto release;
   11453 	}
   11454 
   11455 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11456 
   11457 release:
   11458 	sc->phy.release(sc);
   11459 	return rv;
   11460 }
   11461 
   11462 /*
   11463  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   11464  *  @dev: pointer to the HW structure
   11465  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   11466  *
   11467  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   11468  *  address to store contents of the BM_WUC_ENABLE_REG register.
   11469  */
   11470 static int
   11471 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11472 {
   11473 #ifdef WM_DEBUG
   11474 	struct wm_softc *sc = device_private(dev);
   11475 #endif
   11476 	uint16_t temp;
   11477 	int rv;
   11478 
   11479 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11480 		device_xname(dev), __func__));
   11481 
   11482 	if (!phy_regp)
   11483 		return -1;
   11484 
   11485 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   11486 
   11487 	/* Select Port Control Registers page */
   11488 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11489 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11490 	if (rv != 0)
   11491 		return rv;
   11492 
   11493 	/* Read WUCE and save it */
   11494 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   11495 	if (rv != 0)
   11496 		return rv;
   11497 
   11498 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   11499 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   11500 	 */
   11501 	temp = *phy_regp;
   11502 	temp |= BM_WUC_ENABLE_BIT;
   11503 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   11504 
   11505 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   11506 		return rv;
   11507 
   11508 	/* Select Host Wakeup Registers page - caller now able to write
   11509 	 * registers on the Wakeup registers page
   11510 	 */
   11511 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11512 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   11513 }
   11514 
   11515 /*
   11516  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   11517  *  @dev: pointer to the HW structure
   11518  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   11519  *
   11520  *  Restore BM_WUC_ENABLE_REG to its original value.
   11521  *
   11522  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   11523  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   11524  *  caller.
   11525  */
   11526 static int
   11527 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11528 {
   11529 #ifdef WM_DEBUG
   11530 	struct wm_softc *sc = device_private(dev);
   11531 #endif
   11532 
   11533 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11534 		device_xname(dev), __func__));
   11535 
   11536 	if (!phy_regp)
   11537 		return -1;
   11538 
   11539 	/* Select Port Control Registers page */
   11540 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11541 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11542 
   11543 	/* Restore 769.17 to its original value */
   11544 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   11545 
   11546 	return 0;
   11547 }
   11548 
   11549 /*
   11550  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   11551  *  @sc: pointer to the HW structure
   11552  *  @offset: register offset to be read or written
   11553  *  @val: pointer to the data to read or write
   11554  *  @rd: determines if operation is read or write
   11555  *  @page_set: BM_WUC_PAGE already set and access enabled
   11556  *
   11557  *  Read the PHY register at offset and store the retrieved information in
   11558  *  data, or write data to PHY register at offset.  Note the procedure to
   11559  *  access the PHY wakeup registers is different than reading the other PHY
   11560  *  registers. It works as such:
   11561  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11562  *  2) Set page to 800 for host (801 if we were manageability)
   11563  *  3) Write the address using the address opcode (0x11)
   11564  *  4) Read or write the data using the data opcode (0x12)
   11565  *  5) Restore 769.17.2 to its original value
   11566  *
   11567  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11568  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11569  *
   11570  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11571  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11572  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11573  */
   11574 static int
   11575 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11576 	bool page_set)
   11577 {
   11578 	struct wm_softc *sc = device_private(dev);
   11579 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11580 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11581 	uint16_t wuce;
   11582 	int rv = 0;
   11583 
   11584 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11585 		device_xname(dev), __func__));
   11586 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11587 	if ((sc->sc_type == WM_T_PCH)
   11588 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11589 		device_printf(dev,
   11590 		    "Attempting to access page %d while gig enabled.\n", page);
   11591 	}
   11592 
   11593 	if (!page_set) {
   11594 		/* Enable access to PHY wakeup registers */
   11595 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11596 		if (rv != 0) {
   11597 			device_printf(dev,
   11598 			    "%s: Could not enable PHY wakeup reg access\n",
   11599 			    __func__);
   11600 			return rv;
   11601 		}
   11602 	}
   11603 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11604 		device_xname(sc->sc_dev), __func__, page, regnum));
   11605 
   11606 	/*
   11607 	 * 2) Access PHY wakeup register.
   11608 	 * See wm_access_phy_wakeup_reg_bm.
   11609 	 */
   11610 
   11611 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11612 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11613 	if (rv != 0)
   11614 		return rv;
   11615 
   11616 	if (rd) {
   11617 		/* Read the Wakeup register page value using opcode 0x12 */
   11618 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11619 	} else {
   11620 		/* Write the Wakeup register page value using opcode 0x12 */
   11621 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11622 	}
   11623 	if (rv != 0)
   11624 		return rv;
   11625 
   11626 	if (!page_set)
   11627 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11628 
   11629 	return rv;
   11630 }
   11631 
   11632 /*
   11633  * wm_gmii_hv_readreg:	[mii interface function]
   11634  *
   11635  *	Read a PHY register on the kumeran
   11636  * This could be handled by the PHY layer if we didn't have to lock the
   11637  * resource ...
   11638  */
   11639 static int
   11640 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11641 {
   11642 	struct wm_softc *sc = device_private(dev);
   11643 	int rv;
   11644 
   11645 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11646 		device_xname(dev), __func__));
   11647 	if (sc->phy.acquire(sc)) {
   11648 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11649 		return -1;
   11650 	}
   11651 
   11652 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11653 	sc->phy.release(sc);
   11654 	return rv;
   11655 }
   11656 
   11657 static int
   11658 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11659 {
   11660 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11661 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11662 	int rv;
   11663 
   11664 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11665 
   11666 	/* Page 800 works differently than the rest so it has its own func */
   11667 	if (page == BM_WUC_PAGE)
   11668 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11669 
   11670 	/*
   11671 	 * Lower than page 768 works differently than the rest so it has its
   11672 	 * own func
   11673 	 */
   11674 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11675 		device_printf(dev, "gmii_hv_readreg!!!\n");
   11676 		return -1;
   11677 	}
   11678 
   11679 	/*
   11680 	 * XXX I21[789] documents say that the SMBus Address register is at
   11681 	 * PHY address 01, Page 0 (not 768), Register 26.
   11682 	 */
   11683 	if (page == HV_INTC_FC_PAGE_START)
   11684 		page = 0;
   11685 
   11686 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11687 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11688 		    page << BME1000_PAGE_SHIFT);
   11689 		if (rv != 0)
   11690 			return rv;
   11691 	}
   11692 
   11693 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11694 }
   11695 
   11696 /*
   11697  * wm_gmii_hv_writereg:	[mii interface function]
   11698  *
   11699  *	Write a PHY register on the kumeran.
   11700  * This could be handled by the PHY layer if we didn't have to lock the
   11701  * resource ...
   11702  */
   11703 static int
   11704 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11705 {
   11706 	struct wm_softc *sc = device_private(dev);
   11707 	int rv;
   11708 
   11709 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11710 		device_xname(dev), __func__));
   11711 
   11712 	if (sc->phy.acquire(sc)) {
   11713 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11714 		return -1;
   11715 	}
   11716 
   11717 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11718 	sc->phy.release(sc);
   11719 
   11720 	return rv;
   11721 }
   11722 
   11723 static int
   11724 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11725 {
   11726 	struct wm_softc *sc = device_private(dev);
   11727 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11728 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11729 	int rv;
   11730 
   11731 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11732 
   11733 	/* Page 800 works differently than the rest so it has its own func */
   11734 	if (page == BM_WUC_PAGE)
   11735 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11736 		    false);
   11737 
   11738 	/*
   11739 	 * Lower than page 768 works differently than the rest so it has its
   11740 	 * own func
   11741 	 */
   11742 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11743 		device_printf(dev, "gmii_hv_writereg!!!\n");
   11744 		return -1;
   11745 	}
   11746 
   11747 	{
   11748 		/*
   11749 		 * XXX I21[789] documents say that the SMBus Address register
   11750 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11751 		 */
   11752 		if (page == HV_INTC_FC_PAGE_START)
   11753 			page = 0;
   11754 
   11755 		/*
   11756 		 * XXX Workaround MDIO accesses being disabled after entering
   11757 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11758 		 * register is set)
   11759 		 */
   11760 		if (sc->sc_phytype == WMPHY_82578) {
   11761 			struct mii_softc *child;
   11762 
   11763 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11764 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11765 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11766 			    && ((val & (1 << 11)) != 0)) {
   11767 				device_printf(dev, "XXX need workaround\n");
   11768 			}
   11769 		}
   11770 
   11771 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11772 			rv = wm_gmii_mdic_writereg(dev, 1,
   11773 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11774 			if (rv != 0)
   11775 				return rv;
   11776 		}
   11777 	}
   11778 
   11779 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11780 }
   11781 
   11782 /*
   11783  * wm_gmii_82580_readreg:	[mii interface function]
   11784  *
   11785  *	Read a PHY register on the 82580 and I350.
   11786  * This could be handled by the PHY layer if we didn't have to lock the
   11787  * resource ...
   11788  */
   11789 static int
   11790 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11791 {
   11792 	struct wm_softc *sc = device_private(dev);
   11793 	int rv;
   11794 
   11795 	if (sc->phy.acquire(sc) != 0) {
   11796 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11797 		return -1;
   11798 	}
   11799 
   11800 #ifdef DIAGNOSTIC
   11801 	if (reg > MII_ADDRMASK) {
   11802 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11803 		    __func__, sc->sc_phytype, reg);
   11804 		reg &= MII_ADDRMASK;
   11805 	}
   11806 #endif
   11807 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11808 
   11809 	sc->phy.release(sc);
   11810 	return rv;
   11811 }
   11812 
   11813 /*
   11814  * wm_gmii_82580_writereg:	[mii interface function]
   11815  *
   11816  *	Write a PHY register on the 82580 and I350.
   11817  * This could be handled by the PHY layer if we didn't have to lock the
   11818  * resource ...
   11819  */
   11820 static int
   11821 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11822 {
   11823 	struct wm_softc *sc = device_private(dev);
   11824 	int rv;
   11825 
   11826 	if (sc->phy.acquire(sc) != 0) {
   11827 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11828 		return -1;
   11829 	}
   11830 
   11831 #ifdef DIAGNOSTIC
   11832 	if (reg > MII_ADDRMASK) {
   11833 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11834 		    __func__, sc->sc_phytype, reg);
   11835 		reg &= MII_ADDRMASK;
   11836 	}
   11837 #endif
   11838 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11839 
   11840 	sc->phy.release(sc);
   11841 	return rv;
   11842 }
   11843 
   11844 /*
   11845  * wm_gmii_gs40g_readreg:	[mii interface function]
   11846  *
   11847  *	Read a PHY register on the I2100 and I211.
   11848  * This could be handled by the PHY layer if we didn't have to lock the
   11849  * resource ...
   11850  */
   11851 static int
   11852 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11853 {
   11854 	struct wm_softc *sc = device_private(dev);
   11855 	int page, offset;
   11856 	int rv;
   11857 
   11858 	/* Acquire semaphore */
   11859 	if (sc->phy.acquire(sc)) {
   11860 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11861 		return -1;
   11862 	}
   11863 
   11864 	/* Page select */
   11865 	page = reg >> GS40G_PAGE_SHIFT;
   11866 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11867 	if (rv != 0)
   11868 		goto release;
   11869 
   11870 	/* Read reg */
   11871 	offset = reg & GS40G_OFFSET_MASK;
   11872 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11873 
   11874 release:
   11875 	sc->phy.release(sc);
   11876 	return rv;
   11877 }
   11878 
   11879 /*
   11880  * wm_gmii_gs40g_writereg:	[mii interface function]
   11881  *
   11882  *	Write a PHY register on the I210 and I211.
   11883  * This could be handled by the PHY layer if we didn't have to lock the
   11884  * resource ...
   11885  */
   11886 static int
   11887 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11888 {
   11889 	struct wm_softc *sc = device_private(dev);
   11890 	uint16_t page;
   11891 	int offset, rv;
   11892 
   11893 	/* Acquire semaphore */
   11894 	if (sc->phy.acquire(sc)) {
   11895 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11896 		return -1;
   11897 	}
   11898 
   11899 	/* Page select */
   11900 	page = reg >> GS40G_PAGE_SHIFT;
   11901 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11902 	if (rv != 0)
   11903 		goto release;
   11904 
   11905 	/* Write reg */
   11906 	offset = reg & GS40G_OFFSET_MASK;
   11907 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11908 
   11909 release:
   11910 	/* Release semaphore */
   11911 	sc->phy.release(sc);
   11912 	return rv;
   11913 }
   11914 
   11915 /*
   11916  * wm_gmii_statchg:	[mii interface function]
   11917  *
   11918  *	Callback from MII layer when media changes.
   11919  */
   11920 static void
   11921 wm_gmii_statchg(struct ifnet *ifp)
   11922 {
   11923 	struct wm_softc *sc = ifp->if_softc;
   11924 	struct mii_data *mii = &sc->sc_mii;
   11925 
   11926 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11927 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11928 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11929 
   11930 	/* Get flow control negotiation result. */
   11931 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11932 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11933 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11934 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11935 	}
   11936 
   11937 	if (sc->sc_flowflags & IFM_FLOW) {
   11938 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11939 			sc->sc_ctrl |= CTRL_TFCE;
   11940 			sc->sc_fcrtl |= FCRTL_XONE;
   11941 		}
   11942 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11943 			sc->sc_ctrl |= CTRL_RFCE;
   11944 	}
   11945 
   11946 	if (mii->mii_media_active & IFM_FDX) {
   11947 		DPRINTF(sc, WM_DEBUG_LINK,
   11948 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11949 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11950 	} else {
   11951 		DPRINTF(sc, WM_DEBUG_LINK,
   11952 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11953 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11954 	}
   11955 
   11956 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11957 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11958 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11959 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11960 	if (sc->sc_type == WM_T_80003) {
   11961 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   11962 		case IFM_1000_T:
   11963 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11964 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11965 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11966 			break;
   11967 		default:
   11968 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11969 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11970 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11971 			break;
   11972 		}
   11973 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11974 	}
   11975 }
   11976 
   11977 /* kumeran related (80003, ICH* and PCH*) */
   11978 
   11979 /*
   11980  * wm_kmrn_readreg:
   11981  *
   11982  *	Read a kumeran register
   11983  */
   11984 static int
   11985 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11986 {
   11987 	int rv;
   11988 
   11989 	if (sc->sc_type == WM_T_80003)
   11990 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11991 	else
   11992 		rv = sc->phy.acquire(sc);
   11993 	if (rv != 0) {
   11994 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11995 		    __func__);
   11996 		return rv;
   11997 	}
   11998 
   11999 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   12000 
   12001 	if (sc->sc_type == WM_T_80003)
   12002 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12003 	else
   12004 		sc->phy.release(sc);
   12005 
   12006 	return rv;
   12007 }
   12008 
   12009 static int
   12010 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   12011 {
   12012 
   12013 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12014 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   12015 	    KUMCTRLSTA_REN);
   12016 	CSR_WRITE_FLUSH(sc);
   12017 	delay(2);
   12018 
   12019 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   12020 
   12021 	return 0;
   12022 }
   12023 
   12024 /*
   12025  * wm_kmrn_writereg:
   12026  *
   12027  *	Write a kumeran register
   12028  */
   12029 static int
   12030 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   12031 {
   12032 	int rv;
   12033 
   12034 	if (sc->sc_type == WM_T_80003)
   12035 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12036 	else
   12037 		rv = sc->phy.acquire(sc);
   12038 	if (rv != 0) {
   12039 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   12040 		    __func__);
   12041 		return rv;
   12042 	}
   12043 
   12044 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   12045 
   12046 	if (sc->sc_type == WM_T_80003)
   12047 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12048 	else
   12049 		sc->phy.release(sc);
   12050 
   12051 	return rv;
   12052 }
   12053 
   12054 static int
   12055 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   12056 {
   12057 
   12058 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12059 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   12060 
   12061 	return 0;
   12062 }
   12063 
   12064 /*
   12065  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   12066  * This access method is different from IEEE MMD.
   12067  */
   12068 static int
   12069 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   12070 {
   12071 	struct wm_softc *sc = device_private(dev);
   12072 	int rv;
   12073 
   12074 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   12075 	if (rv != 0)
   12076 		return rv;
   12077 
   12078 	if (rd)
   12079 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   12080 	else
   12081 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   12082 	return rv;
   12083 }
   12084 
   12085 static int
   12086 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   12087 {
   12088 
   12089 	return wm_access_emi_reg_locked(dev, reg, val, true);
   12090 }
   12091 
   12092 static int
   12093 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   12094 {
   12095 
   12096 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   12097 }
   12098 
   12099 /* SGMII related */
   12100 
   12101 /*
   12102  * wm_sgmii_uses_mdio
   12103  *
   12104  * Check whether the transaction is to the internal PHY or the external
   12105  * MDIO interface. Return true if it's MDIO.
   12106  */
   12107 static bool
   12108 wm_sgmii_uses_mdio(struct wm_softc *sc)
   12109 {
   12110 	uint32_t reg;
   12111 	bool ismdio = false;
   12112 
   12113 	switch (sc->sc_type) {
   12114 	case WM_T_82575:
   12115 	case WM_T_82576:
   12116 		reg = CSR_READ(sc, WMREG_MDIC);
   12117 		ismdio = ((reg & MDIC_DEST) != 0);
   12118 		break;
   12119 	case WM_T_82580:
   12120 	case WM_T_I350:
   12121 	case WM_T_I354:
   12122 	case WM_T_I210:
   12123 	case WM_T_I211:
   12124 		reg = CSR_READ(sc, WMREG_MDICNFG);
   12125 		ismdio = ((reg & MDICNFG_DEST) != 0);
   12126 		break;
   12127 	default:
   12128 		break;
   12129 	}
   12130 
   12131 	return ismdio;
   12132 }
   12133 
   12134 /* Setup internal SGMII PHY for SFP */
   12135 static void
   12136 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
   12137 {
   12138 	uint16_t id1, id2, phyreg;
   12139 	int i, rv;
   12140 
   12141 	if (((sc->sc_flags & WM_F_SGMII) == 0)
   12142 	    || ((sc->sc_flags & WM_F_SFP) == 0))
   12143 		return;
   12144 
   12145 	for (i = 0; i < MII_NPHY; i++) {
   12146 		sc->phy.no_errprint = true;
   12147 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
   12148 		if (rv != 0)
   12149 			continue;
   12150 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
   12151 		if (rv != 0)
   12152 			continue;
   12153 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
   12154 			continue;
   12155 		sc->phy.no_errprint = false;
   12156 
   12157 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
   12158 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
   12159 		phyreg |= ESSR_SGMII_WOC_COPPER;
   12160 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
   12161 		break;
   12162 	}
   12163 
   12164 }
   12165 
   12166 /*
   12167  * wm_sgmii_readreg:	[mii interface function]
   12168  *
   12169  *	Read a PHY register on the SGMII
   12170  * This could be handled by the PHY layer if we didn't have to lock the
   12171  * resource ...
   12172  */
   12173 static int
   12174 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12175 {
   12176 	struct wm_softc *sc = device_private(dev);
   12177 	int rv;
   12178 
   12179 	if (sc->phy.acquire(sc)) {
   12180 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12181 		return -1;
   12182 	}
   12183 
   12184 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   12185 
   12186 	sc->phy.release(sc);
   12187 	return rv;
   12188 }
   12189 
   12190 static int
   12191 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12192 {
   12193 	struct wm_softc *sc = device_private(dev);
   12194 	uint32_t i2ccmd;
   12195 	int i, rv = 0;
   12196 
   12197 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12198 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12199 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12200 
   12201 	/* Poll the ready bit */
   12202 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12203 		delay(50);
   12204 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12205 		if (i2ccmd & I2CCMD_READY)
   12206 			break;
   12207 	}
   12208 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12209 		device_printf(dev, "I2CCMD Read did not complete\n");
   12210 		rv = ETIMEDOUT;
   12211 	}
   12212 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12213 		if (!sc->phy.no_errprint)
   12214 			device_printf(dev, "I2CCMD Error bit set\n");
   12215 		rv = EIO;
   12216 	}
   12217 
   12218 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   12219 
   12220 	return rv;
   12221 }
   12222 
   12223 /*
   12224  * wm_sgmii_writereg:	[mii interface function]
   12225  *
   12226  *	Write a PHY register on the SGMII.
   12227  * This could be handled by the PHY layer if we didn't have to lock the
   12228  * resource ...
   12229  */
   12230 static int
   12231 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   12232 {
   12233 	struct wm_softc *sc = device_private(dev);
   12234 	int rv;
   12235 
   12236 	if (sc->phy.acquire(sc) != 0) {
   12237 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12238 		return -1;
   12239 	}
   12240 
   12241 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   12242 
   12243 	sc->phy.release(sc);
   12244 
   12245 	return rv;
   12246 }
   12247 
   12248 static int
   12249 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12250 {
   12251 	struct wm_softc *sc = device_private(dev);
   12252 	uint32_t i2ccmd;
   12253 	uint16_t swapdata;
   12254 	int rv = 0;
   12255 	int i;
   12256 
   12257 	/* Swap the data bytes for the I2C interface */
   12258 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   12259 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12260 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   12261 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12262 
   12263 	/* Poll the ready bit */
   12264 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12265 		delay(50);
   12266 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12267 		if (i2ccmd & I2CCMD_READY)
   12268 			break;
   12269 	}
   12270 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12271 		device_printf(dev, "I2CCMD Write did not complete\n");
   12272 		rv = ETIMEDOUT;
   12273 	}
   12274 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12275 		device_printf(dev, "I2CCMD Error bit set\n");
   12276 		rv = EIO;
   12277 	}
   12278 
   12279 	return rv;
   12280 }
   12281 
   12282 /* TBI related */
   12283 
   12284 static bool
   12285 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   12286 {
   12287 	bool sig;
   12288 
   12289 	sig = ctrl & CTRL_SWDPIN(1);
   12290 
   12291 	/*
   12292 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   12293 	 * detect a signal, 1 if they don't.
   12294 	 */
   12295 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   12296 		sig = !sig;
   12297 
   12298 	return sig;
   12299 }
   12300 
   12301 /*
   12302  * wm_tbi_mediainit:
   12303  *
   12304  *	Initialize media for use on 1000BASE-X devices.
   12305  */
   12306 static void
   12307 wm_tbi_mediainit(struct wm_softc *sc)
   12308 {
   12309 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12310 	const char *sep = "";
   12311 
   12312 	if (sc->sc_type < WM_T_82543)
   12313 		sc->sc_tipg = TIPG_WM_DFLT;
   12314 	else
   12315 		sc->sc_tipg = TIPG_LG_DFLT;
   12316 
   12317 	sc->sc_tbi_serdes_anegticks = 5;
   12318 
   12319 	/* Initialize our media structures */
   12320 	sc->sc_mii.mii_ifp = ifp;
   12321 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   12322 
   12323 	ifp->if_baudrate = IF_Gbps(1);
   12324 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   12325 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12326 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12327 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   12328 		    sc->sc_core_lock);
   12329 	} else {
   12330 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12331 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   12332 	}
   12333 
   12334 	/*
   12335 	 * SWD Pins:
   12336 	 *
   12337 	 *	0 = Link LED (output)
   12338 	 *	1 = Loss Of Signal (input)
   12339 	 */
   12340 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   12341 
   12342 	/* XXX Perhaps this is only for TBI */
   12343 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12344 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   12345 
   12346 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   12347 		sc->sc_ctrl &= ~CTRL_LRST;
   12348 
   12349 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12350 
   12351 #define	ADD(ss, mm, dd)							\
   12352 do {									\
   12353 	aprint_normal("%s%s", sep, ss);					\
   12354 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   12355 	sep = ", ";							\
   12356 } while (/*CONSTCOND*/0)
   12357 
   12358 	aprint_normal_dev(sc->sc_dev, "");
   12359 
   12360 	if (sc->sc_type == WM_T_I354) {
   12361 		uint32_t status;
   12362 
   12363 		status = CSR_READ(sc, WMREG_STATUS);
   12364 		if (((status & STATUS_2P5_SKU) != 0)
   12365 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12366 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   12367 		} else
   12368 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   12369 	} else if (sc->sc_type == WM_T_82545) {
   12370 		/* Only 82545 is LX (XXX except SFP) */
   12371 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12372 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12373 	} else if (sc->sc_sfptype != 0) {
   12374 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   12375 		switch (sc->sc_sfptype) {
   12376 		default:
   12377 		case SFF_SFP_ETH_FLAGS_1000SX:
   12378 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12379 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12380 			break;
   12381 		case SFF_SFP_ETH_FLAGS_1000LX:
   12382 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12383 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12384 			break;
   12385 		case SFF_SFP_ETH_FLAGS_1000CX:
   12386 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   12387 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   12388 			break;
   12389 		case SFF_SFP_ETH_FLAGS_1000T:
   12390 			ADD("1000baseT", IFM_1000_T, 0);
   12391 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   12392 			break;
   12393 		case SFF_SFP_ETH_FLAGS_100FX:
   12394 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   12395 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   12396 			break;
   12397 		}
   12398 	} else {
   12399 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12400 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12401 	}
   12402 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   12403 	aprint_normal("\n");
   12404 
   12405 #undef ADD
   12406 
   12407 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   12408 }
   12409 
   12410 /*
   12411  * wm_tbi_mediachange:	[ifmedia interface function]
   12412  *
   12413  *	Set hardware to newly-selected media on a 1000BASE-X device.
   12414  */
   12415 static int
   12416 wm_tbi_mediachange(struct ifnet *ifp)
   12417 {
   12418 	struct wm_softc *sc = ifp->if_softc;
   12419 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12420 	uint32_t status, ctrl;
   12421 	bool signal;
   12422 	int i;
   12423 
   12424 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   12425 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12426 		/* XXX need some work for >= 82571 and < 82575 */
   12427 		if (sc->sc_type < WM_T_82575)
   12428 			return 0;
   12429 	}
   12430 
   12431 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12432 	    || (sc->sc_type >= WM_T_82575))
   12433 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12434 
   12435 	sc->sc_ctrl &= ~CTRL_LRST;
   12436 	sc->sc_txcw = TXCW_ANE;
   12437 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12438 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   12439 	else if (ife->ifm_media & IFM_FDX)
   12440 		sc->sc_txcw |= TXCW_FD;
   12441 	else
   12442 		sc->sc_txcw |= TXCW_HD;
   12443 
   12444 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   12445 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   12446 
   12447 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   12448 		device_xname(sc->sc_dev), sc->sc_txcw));
   12449 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12450 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12451 	CSR_WRITE_FLUSH(sc);
   12452 	delay(1000);
   12453 
   12454 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12455 	signal = wm_tbi_havesignal(sc, ctrl);
   12456 
   12457 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   12458 		signal));
   12459 
   12460 	if (signal) {
   12461 		/* Have signal; wait for the link to come up. */
   12462 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   12463 			delay(10000);
   12464 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   12465 				break;
   12466 		}
   12467 
   12468 		DPRINTF(sc, WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   12469 			device_xname(sc->sc_dev), i));
   12470 
   12471 		status = CSR_READ(sc, WMREG_STATUS);
   12472 		DPRINTF(sc, WM_DEBUG_LINK,
   12473 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   12474 			device_xname(sc->sc_dev), status, STATUS_LU));
   12475 		if (status & STATUS_LU) {
   12476 			/* Link is up. */
   12477 			DPRINTF(sc, WM_DEBUG_LINK,
   12478 			    ("%s: LINK: set media -> link up %s\n",
   12479 				device_xname(sc->sc_dev),
   12480 				(status & STATUS_FD) ? "FDX" : "HDX"));
   12481 
   12482 			/*
   12483 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   12484 			 * so we should update sc->sc_ctrl
   12485 			 */
   12486 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   12487 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12488 			sc->sc_fcrtl &= ~FCRTL_XONE;
   12489 			if (status & STATUS_FD)
   12490 				sc->sc_tctl |=
   12491 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12492 			else
   12493 				sc->sc_tctl |=
   12494 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12495 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   12496 				sc->sc_fcrtl |= FCRTL_XONE;
   12497 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12498 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12499 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12500 			sc->sc_tbi_linkup = 1;
   12501 		} else {
   12502 			if (i == WM_LINKUP_TIMEOUT)
   12503 				wm_check_for_link(sc);
   12504 			/* Link is down. */
   12505 			DPRINTF(sc, WM_DEBUG_LINK,
   12506 			    ("%s: LINK: set media -> link down\n",
   12507 				device_xname(sc->sc_dev)));
   12508 			sc->sc_tbi_linkup = 0;
   12509 		}
   12510 	} else {
   12511 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   12512 			device_xname(sc->sc_dev)));
   12513 		sc->sc_tbi_linkup = 0;
   12514 	}
   12515 
   12516 	wm_tbi_serdes_set_linkled(sc);
   12517 
   12518 	return 0;
   12519 }
   12520 
   12521 /*
   12522  * wm_tbi_mediastatus:	[ifmedia interface function]
   12523  *
   12524  *	Get the current interface media status on a 1000BASE-X device.
   12525  */
   12526 static void
   12527 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12528 {
   12529 	struct wm_softc *sc = ifp->if_softc;
   12530 	uint32_t ctrl, status;
   12531 
   12532 	ifmr->ifm_status = IFM_AVALID;
   12533 	ifmr->ifm_active = IFM_ETHER;
   12534 
   12535 	status = CSR_READ(sc, WMREG_STATUS);
   12536 	if ((status & STATUS_LU) == 0) {
   12537 		ifmr->ifm_active |= IFM_NONE;
   12538 		return;
   12539 	}
   12540 
   12541 	ifmr->ifm_status |= IFM_ACTIVE;
   12542 	/* Only 82545 is LX */
   12543 	if (sc->sc_type == WM_T_82545)
   12544 		ifmr->ifm_active |= IFM_1000_LX;
   12545 	else
   12546 		ifmr->ifm_active |= IFM_1000_SX;
   12547 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   12548 		ifmr->ifm_active |= IFM_FDX;
   12549 	else
   12550 		ifmr->ifm_active |= IFM_HDX;
   12551 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12552 	if (ctrl & CTRL_RFCE)
   12553 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   12554 	if (ctrl & CTRL_TFCE)
   12555 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   12556 }
   12557 
   12558 /* XXX TBI only */
   12559 static int
   12560 wm_check_for_link(struct wm_softc *sc)
   12561 {
   12562 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12563 	uint32_t rxcw;
   12564 	uint32_t ctrl;
   12565 	uint32_t status;
   12566 	bool signal;
   12567 
   12568 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
   12569 		device_xname(sc->sc_dev), __func__));
   12570 
   12571 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12572 		/* XXX need some work for >= 82571 */
   12573 		if (sc->sc_type >= WM_T_82571) {
   12574 			sc->sc_tbi_linkup = 1;
   12575 			return 0;
   12576 		}
   12577 	}
   12578 
   12579 	rxcw = CSR_READ(sc, WMREG_RXCW);
   12580 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12581 	status = CSR_READ(sc, WMREG_STATUS);
   12582 	signal = wm_tbi_havesignal(sc, ctrl);
   12583 
   12584 	DPRINTF(sc, WM_DEBUG_LINK,
   12585 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   12586 		device_xname(sc->sc_dev), __func__, signal,
   12587 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   12588 
   12589 	/*
   12590 	 * SWDPIN   LU RXCW
   12591 	 *	0    0	  0
   12592 	 *	0    0	  1	(should not happen)
   12593 	 *	0    1	  0	(should not happen)
   12594 	 *	0    1	  1	(should not happen)
   12595 	 *	1    0	  0	Disable autonego and force linkup
   12596 	 *	1    0	  1	got /C/ but not linkup yet
   12597 	 *	1    1	  0	(linkup)
   12598 	 *	1    1	  1	If IFM_AUTO, back to autonego
   12599 	 *
   12600 	 */
   12601 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   12602 		DPRINTF(sc, WM_DEBUG_LINK,
   12603 		    ("%s: %s: force linkup and fullduplex\n",
   12604 			device_xname(sc->sc_dev), __func__));
   12605 		sc->sc_tbi_linkup = 0;
   12606 		/* Disable auto-negotiation in the TXCW register */
   12607 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   12608 
   12609 		/*
   12610 		 * Force link-up and also force full-duplex.
   12611 		 *
   12612 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   12613 		 * so we should update sc->sc_ctrl
   12614 		 */
   12615 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   12616 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12617 	} else if (((status & STATUS_LU) != 0)
   12618 	    && ((rxcw & RXCW_C) != 0)
   12619 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12620 		sc->sc_tbi_linkup = 1;
   12621 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12622 			device_xname(sc->sc_dev),
   12623 			__func__));
   12624 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12625 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12626 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   12627 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
   12628 			device_xname(sc->sc_dev), __func__));
   12629 	} else {
   12630 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12631 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12632 			status));
   12633 	}
   12634 
   12635 	return 0;
   12636 }
   12637 
   12638 /*
   12639  * wm_tbi_tick:
   12640  *
   12641  *	Check the link on TBI devices.
   12642  *	This function acts as mii_tick().
   12643  */
   12644 static void
   12645 wm_tbi_tick(struct wm_softc *sc)
   12646 {
   12647 	struct mii_data *mii = &sc->sc_mii;
   12648 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12649 	uint32_t status;
   12650 
   12651 	KASSERT(WM_CORE_LOCKED(sc));
   12652 
   12653 	status = CSR_READ(sc, WMREG_STATUS);
   12654 
   12655 	/* XXX is this needed? */
   12656 	(void)CSR_READ(sc, WMREG_RXCW);
   12657 	(void)CSR_READ(sc, WMREG_CTRL);
   12658 
   12659 	/* set link status */
   12660 	if ((status & STATUS_LU) == 0) {
   12661 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12662 			device_xname(sc->sc_dev)));
   12663 		sc->sc_tbi_linkup = 0;
   12664 	} else if (sc->sc_tbi_linkup == 0) {
   12665 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12666 			device_xname(sc->sc_dev),
   12667 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12668 		sc->sc_tbi_linkup = 1;
   12669 		sc->sc_tbi_serdes_ticks = 0;
   12670 	}
   12671 
   12672 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12673 		goto setled;
   12674 
   12675 	if ((status & STATUS_LU) == 0) {
   12676 		sc->sc_tbi_linkup = 0;
   12677 		/* If the timer expired, retry autonegotiation */
   12678 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12679 		    && (++sc->sc_tbi_serdes_ticks
   12680 			>= sc->sc_tbi_serdes_anegticks)) {
   12681 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12682 				device_xname(sc->sc_dev), __func__));
   12683 			sc->sc_tbi_serdes_ticks = 0;
   12684 			/*
   12685 			 * Reset the link, and let autonegotiation do
   12686 			 * its thing
   12687 			 */
   12688 			sc->sc_ctrl |= CTRL_LRST;
   12689 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12690 			CSR_WRITE_FLUSH(sc);
   12691 			delay(1000);
   12692 			sc->sc_ctrl &= ~CTRL_LRST;
   12693 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12694 			CSR_WRITE_FLUSH(sc);
   12695 			delay(1000);
   12696 			CSR_WRITE(sc, WMREG_TXCW,
   12697 			    sc->sc_txcw & ~TXCW_ANE);
   12698 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12699 		}
   12700 	}
   12701 
   12702 setled:
   12703 	wm_tbi_serdes_set_linkled(sc);
   12704 }
   12705 
   12706 /* SERDES related */
   12707 static void
   12708 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12709 {
   12710 	uint32_t reg;
   12711 
   12712 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12713 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12714 		return;
   12715 
   12716 	/* Enable PCS to turn on link */
   12717 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12718 	reg |= PCS_CFG_PCS_EN;
   12719 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12720 
   12721 	/* Power up the laser */
   12722 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12723 	reg &= ~CTRL_EXT_SWDPIN(3);
   12724 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12725 
   12726 	/* Flush the write to verify completion */
   12727 	CSR_WRITE_FLUSH(sc);
   12728 	delay(1000);
   12729 }
   12730 
   12731 static int
   12732 wm_serdes_mediachange(struct ifnet *ifp)
   12733 {
   12734 	struct wm_softc *sc = ifp->if_softc;
   12735 	bool pcs_autoneg = true; /* XXX */
   12736 	uint32_t ctrl_ext, pcs_lctl, reg;
   12737 
   12738 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12739 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12740 		return 0;
   12741 
   12742 	/* XXX Currently, this function is not called on 8257[12] */
   12743 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12744 	    || (sc->sc_type >= WM_T_82575))
   12745 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12746 
   12747 	/* Power on the sfp cage if present */
   12748 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12749 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12750 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   12751 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12752 
   12753 	sc->sc_ctrl |= CTRL_SLU;
   12754 
   12755 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   12756 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12757 
   12758 		reg = CSR_READ(sc, WMREG_CONNSW);
   12759 		reg |= CONNSW_ENRGSRC;
   12760 		CSR_WRITE(sc, WMREG_CONNSW, reg);
   12761 	}
   12762 
   12763 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12764 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12765 	case CTRL_EXT_LINK_MODE_SGMII:
   12766 		/* SGMII mode lets the phy handle forcing speed/duplex */
   12767 		pcs_autoneg = true;
   12768 		/* Autoneg time out should be disabled for SGMII mode */
   12769 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12770 		break;
   12771 	case CTRL_EXT_LINK_MODE_1000KX:
   12772 		pcs_autoneg = false;
   12773 		/* FALLTHROUGH */
   12774 	default:
   12775 		if ((sc->sc_type == WM_T_82575)
   12776 		    || (sc->sc_type == WM_T_82576)) {
   12777 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12778 				pcs_autoneg = false;
   12779 		}
   12780 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12781 		    | CTRL_FRCFDX;
   12782 
   12783 		/* Set speed of 1000/Full if speed/duplex is forced */
   12784 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12785 	}
   12786 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12787 
   12788 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   12789 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   12790 
   12791 	if (pcs_autoneg) {
   12792 		/* Set PCS register for autoneg */
   12793 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12794 
   12795 		/* Disable force flow control for autoneg */
   12796 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12797 
   12798 		/* Configure flow control advertisement for autoneg */
   12799 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12800 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12801 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12802 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12803 	} else
   12804 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12805 
   12806 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12807 
   12808 	return 0;
   12809 }
   12810 
   12811 static void
   12812 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12813 {
   12814 	struct wm_softc *sc = ifp->if_softc;
   12815 	struct mii_data *mii = &sc->sc_mii;
   12816 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12817 	uint32_t pcs_adv, pcs_lpab, reg;
   12818 
   12819 	ifmr->ifm_status = IFM_AVALID;
   12820 	ifmr->ifm_active = IFM_ETHER;
   12821 
   12822 	/* Check PCS */
   12823 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12824 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12825 		ifmr->ifm_active |= IFM_NONE;
   12826 		sc->sc_tbi_linkup = 0;
   12827 		goto setled;
   12828 	}
   12829 
   12830 	sc->sc_tbi_linkup = 1;
   12831 	ifmr->ifm_status |= IFM_ACTIVE;
   12832 	if (sc->sc_type == WM_T_I354) {
   12833 		uint32_t status;
   12834 
   12835 		status = CSR_READ(sc, WMREG_STATUS);
   12836 		if (((status & STATUS_2P5_SKU) != 0)
   12837 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12838 			ifmr->ifm_active |= IFM_2500_KX;
   12839 		} else
   12840 			ifmr->ifm_active |= IFM_1000_KX;
   12841 	} else {
   12842 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12843 		case PCS_LSTS_SPEED_10:
   12844 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12845 			break;
   12846 		case PCS_LSTS_SPEED_100:
   12847 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12848 			break;
   12849 		case PCS_LSTS_SPEED_1000:
   12850 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12851 			break;
   12852 		default:
   12853 			device_printf(sc->sc_dev, "Unknown speed\n");
   12854 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12855 			break;
   12856 		}
   12857 	}
   12858 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   12859 	if ((reg & PCS_LSTS_FDX) != 0)
   12860 		ifmr->ifm_active |= IFM_FDX;
   12861 	else
   12862 		ifmr->ifm_active |= IFM_HDX;
   12863 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12864 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12865 		/* Check flow */
   12866 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12867 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12868 			DPRINTF(sc, WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12869 			goto setled;
   12870 		}
   12871 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12872 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12873 		DPRINTF(sc, WM_DEBUG_LINK,
   12874 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12875 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12876 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12877 			mii->mii_media_active |= IFM_FLOW
   12878 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12879 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12880 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12881 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12882 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12883 			mii->mii_media_active |= IFM_FLOW
   12884 			    | IFM_ETH_TXPAUSE;
   12885 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12886 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12887 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12888 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12889 			mii->mii_media_active |= IFM_FLOW
   12890 			    | IFM_ETH_RXPAUSE;
   12891 		}
   12892 	}
   12893 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12894 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12895 setled:
   12896 	wm_tbi_serdes_set_linkled(sc);
   12897 }
   12898 
   12899 /*
   12900  * wm_serdes_tick:
   12901  *
   12902  *	Check the link on serdes devices.
   12903  */
   12904 static void
   12905 wm_serdes_tick(struct wm_softc *sc)
   12906 {
   12907 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12908 	struct mii_data *mii = &sc->sc_mii;
   12909 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12910 	uint32_t reg;
   12911 
   12912 	KASSERT(WM_CORE_LOCKED(sc));
   12913 
   12914 	mii->mii_media_status = IFM_AVALID;
   12915 	mii->mii_media_active = IFM_ETHER;
   12916 
   12917 	/* Check PCS */
   12918 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12919 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12920 		mii->mii_media_status |= IFM_ACTIVE;
   12921 		sc->sc_tbi_linkup = 1;
   12922 		sc->sc_tbi_serdes_ticks = 0;
   12923 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12924 		if ((reg & PCS_LSTS_FDX) != 0)
   12925 			mii->mii_media_active |= IFM_FDX;
   12926 		else
   12927 			mii->mii_media_active |= IFM_HDX;
   12928 	} else {
   12929 		mii->mii_media_status |= IFM_NONE;
   12930 		sc->sc_tbi_linkup = 0;
   12931 		/* If the timer expired, retry autonegotiation */
   12932 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12933 		    && (++sc->sc_tbi_serdes_ticks
   12934 			>= sc->sc_tbi_serdes_anegticks)) {
   12935 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12936 				device_xname(sc->sc_dev), __func__));
   12937 			sc->sc_tbi_serdes_ticks = 0;
   12938 			/* XXX */
   12939 			wm_serdes_mediachange(ifp);
   12940 		}
   12941 	}
   12942 
   12943 	wm_tbi_serdes_set_linkled(sc);
   12944 }
   12945 
   12946 /* SFP related */
   12947 
   12948 static int
   12949 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12950 {
   12951 	uint32_t i2ccmd;
   12952 	int i;
   12953 
   12954 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12955 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12956 
   12957 	/* Poll the ready bit */
   12958 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12959 		delay(50);
   12960 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12961 		if (i2ccmd & I2CCMD_READY)
   12962 			break;
   12963 	}
   12964 	if ((i2ccmd & I2CCMD_READY) == 0)
   12965 		return -1;
   12966 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12967 		return -1;
   12968 
   12969 	*data = i2ccmd & 0x00ff;
   12970 
   12971 	return 0;
   12972 }
   12973 
   12974 static uint32_t
   12975 wm_sfp_get_media_type(struct wm_softc *sc)
   12976 {
   12977 	uint32_t ctrl_ext;
   12978 	uint8_t val = 0;
   12979 	int timeout = 3;
   12980 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12981 	int rv = -1;
   12982 
   12983 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12984 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12985 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12986 	CSR_WRITE_FLUSH(sc);
   12987 
   12988 	/* Read SFP module data */
   12989 	while (timeout) {
   12990 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12991 		if (rv == 0)
   12992 			break;
   12993 		delay(100*1000); /* XXX too big */
   12994 		timeout--;
   12995 	}
   12996 	if (rv != 0)
   12997 		goto out;
   12998 
   12999 	switch (val) {
   13000 	case SFF_SFP_ID_SFF:
   13001 		aprint_normal_dev(sc->sc_dev,
   13002 		    "Module/Connector soldered to board\n");
   13003 		break;
   13004 	case SFF_SFP_ID_SFP:
   13005 		sc->sc_flags |= WM_F_SFP;
   13006 		break;
   13007 	case SFF_SFP_ID_UNKNOWN:
   13008 		goto out;
   13009 	default:
   13010 		break;
   13011 	}
   13012 
   13013 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   13014 	if (rv != 0)
   13015 		goto out;
   13016 
   13017 	sc->sc_sfptype = val;
   13018 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   13019 		mediatype = WM_MEDIATYPE_SERDES;
   13020 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   13021 		sc->sc_flags |= WM_F_SGMII;
   13022 		mediatype = WM_MEDIATYPE_COPPER;
   13023 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   13024 		sc->sc_flags |= WM_F_SGMII;
   13025 		mediatype = WM_MEDIATYPE_SERDES;
   13026 	} else {
   13027 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   13028 		    __func__, sc->sc_sfptype);
   13029 		sc->sc_sfptype = 0; /* XXX unknown */
   13030 	}
   13031 
   13032 out:
   13033 	/* Restore I2C interface setting */
   13034 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13035 
   13036 	return mediatype;
   13037 }
   13038 
   13039 /*
   13040  * NVM related.
   13041  * Microwire, SPI (w/wo EERD) and Flash.
   13042  */
   13043 
   13044 /* Both spi and uwire */
   13045 
   13046 /*
   13047  * wm_eeprom_sendbits:
   13048  *
   13049  *	Send a series of bits to the EEPROM.
   13050  */
   13051 static void
   13052 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   13053 {
   13054 	uint32_t reg;
   13055 	int x;
   13056 
   13057 	reg = CSR_READ(sc, WMREG_EECD);
   13058 
   13059 	for (x = nbits; x > 0; x--) {
   13060 		if (bits & (1U << (x - 1)))
   13061 			reg |= EECD_DI;
   13062 		else
   13063 			reg &= ~EECD_DI;
   13064 		CSR_WRITE(sc, WMREG_EECD, reg);
   13065 		CSR_WRITE_FLUSH(sc);
   13066 		delay(2);
   13067 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13068 		CSR_WRITE_FLUSH(sc);
   13069 		delay(2);
   13070 		CSR_WRITE(sc, WMREG_EECD, reg);
   13071 		CSR_WRITE_FLUSH(sc);
   13072 		delay(2);
   13073 	}
   13074 }
   13075 
   13076 /*
   13077  * wm_eeprom_recvbits:
   13078  *
   13079  *	Receive a series of bits from the EEPROM.
   13080  */
   13081 static void
   13082 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   13083 {
   13084 	uint32_t reg, val;
   13085 	int x;
   13086 
   13087 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   13088 
   13089 	val = 0;
   13090 	for (x = nbits; x > 0; x--) {
   13091 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13092 		CSR_WRITE_FLUSH(sc);
   13093 		delay(2);
   13094 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   13095 			val |= (1U << (x - 1));
   13096 		CSR_WRITE(sc, WMREG_EECD, reg);
   13097 		CSR_WRITE_FLUSH(sc);
   13098 		delay(2);
   13099 	}
   13100 	*valp = val;
   13101 }
   13102 
   13103 /* Microwire */
   13104 
   13105 /*
   13106  * wm_nvm_read_uwire:
   13107  *
   13108  *	Read a word from the EEPROM using the MicroWire protocol.
   13109  */
   13110 static int
   13111 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13112 {
   13113 	uint32_t reg, val;
   13114 	int i;
   13115 
   13116 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13117 		device_xname(sc->sc_dev), __func__));
   13118 
   13119 	if (sc->nvm.acquire(sc) != 0)
   13120 		return -1;
   13121 
   13122 	for (i = 0; i < wordcnt; i++) {
   13123 		/* Clear SK and DI. */
   13124 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   13125 		CSR_WRITE(sc, WMREG_EECD, reg);
   13126 
   13127 		/*
   13128 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   13129 		 * and Xen.
   13130 		 *
   13131 		 * We use this workaround only for 82540 because qemu's
   13132 		 * e1000 act as 82540.
   13133 		 */
   13134 		if (sc->sc_type == WM_T_82540) {
   13135 			reg |= EECD_SK;
   13136 			CSR_WRITE(sc, WMREG_EECD, reg);
   13137 			reg &= ~EECD_SK;
   13138 			CSR_WRITE(sc, WMREG_EECD, reg);
   13139 			CSR_WRITE_FLUSH(sc);
   13140 			delay(2);
   13141 		}
   13142 		/* XXX: end of workaround */
   13143 
   13144 		/* Set CHIP SELECT. */
   13145 		reg |= EECD_CS;
   13146 		CSR_WRITE(sc, WMREG_EECD, reg);
   13147 		CSR_WRITE_FLUSH(sc);
   13148 		delay(2);
   13149 
   13150 		/* Shift in the READ command. */
   13151 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   13152 
   13153 		/* Shift in address. */
   13154 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   13155 
   13156 		/* Shift out the data. */
   13157 		wm_eeprom_recvbits(sc, &val, 16);
   13158 		data[i] = val & 0xffff;
   13159 
   13160 		/* Clear CHIP SELECT. */
   13161 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   13162 		CSR_WRITE(sc, WMREG_EECD, reg);
   13163 		CSR_WRITE_FLUSH(sc);
   13164 		delay(2);
   13165 	}
   13166 
   13167 	sc->nvm.release(sc);
   13168 	return 0;
   13169 }
   13170 
   13171 /* SPI */
   13172 
   13173 /*
   13174  * Set SPI and FLASH related information from the EECD register.
   13175  * For 82541 and 82547, the word size is taken from EEPROM.
   13176  */
   13177 static int
   13178 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   13179 {
   13180 	int size;
   13181 	uint32_t reg;
   13182 	uint16_t data;
   13183 
   13184 	reg = CSR_READ(sc, WMREG_EECD);
   13185 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   13186 
   13187 	/* Read the size of NVM from EECD by default */
   13188 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13189 	switch (sc->sc_type) {
   13190 	case WM_T_82541:
   13191 	case WM_T_82541_2:
   13192 	case WM_T_82547:
   13193 	case WM_T_82547_2:
   13194 		/* Set dummy value to access EEPROM */
   13195 		sc->sc_nvm_wordsize = 64;
   13196 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   13197 			aprint_error_dev(sc->sc_dev,
   13198 			    "%s: failed to read EEPROM size\n", __func__);
   13199 		}
   13200 		reg = data;
   13201 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13202 		if (size == 0)
   13203 			size = 6; /* 64 word size */
   13204 		else
   13205 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   13206 		break;
   13207 	case WM_T_80003:
   13208 	case WM_T_82571:
   13209 	case WM_T_82572:
   13210 	case WM_T_82573: /* SPI case */
   13211 	case WM_T_82574: /* SPI case */
   13212 	case WM_T_82583: /* SPI case */
   13213 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13214 		if (size > 14)
   13215 			size = 14;
   13216 		break;
   13217 	case WM_T_82575:
   13218 	case WM_T_82576:
   13219 	case WM_T_82580:
   13220 	case WM_T_I350:
   13221 	case WM_T_I354:
   13222 	case WM_T_I210:
   13223 	case WM_T_I211:
   13224 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13225 		if (size > 15)
   13226 			size = 15;
   13227 		break;
   13228 	default:
   13229 		aprint_error_dev(sc->sc_dev,
   13230 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   13231 		return -1;
   13232 		break;
   13233 	}
   13234 
   13235 	sc->sc_nvm_wordsize = 1 << size;
   13236 
   13237 	return 0;
   13238 }
   13239 
   13240 /*
   13241  * wm_nvm_ready_spi:
   13242  *
   13243  *	Wait for a SPI EEPROM to be ready for commands.
   13244  */
   13245 static int
   13246 wm_nvm_ready_spi(struct wm_softc *sc)
   13247 {
   13248 	uint32_t val;
   13249 	int usec;
   13250 
   13251 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13252 		device_xname(sc->sc_dev), __func__));
   13253 
   13254 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   13255 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   13256 		wm_eeprom_recvbits(sc, &val, 8);
   13257 		if ((val & SPI_SR_RDY) == 0)
   13258 			break;
   13259 	}
   13260 	if (usec >= SPI_MAX_RETRIES) {
   13261 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   13262 		return -1;
   13263 	}
   13264 	return 0;
   13265 }
   13266 
   13267 /*
   13268  * wm_nvm_read_spi:
   13269  *
   13270  *	Read a work from the EEPROM using the SPI protocol.
   13271  */
   13272 static int
   13273 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13274 {
   13275 	uint32_t reg, val;
   13276 	int i;
   13277 	uint8_t opc;
   13278 	int rv = 0;
   13279 
   13280 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13281 		device_xname(sc->sc_dev), __func__));
   13282 
   13283 	if (sc->nvm.acquire(sc) != 0)
   13284 		return -1;
   13285 
   13286 	/* Clear SK and CS. */
   13287 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   13288 	CSR_WRITE(sc, WMREG_EECD, reg);
   13289 	CSR_WRITE_FLUSH(sc);
   13290 	delay(2);
   13291 
   13292 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   13293 		goto out;
   13294 
   13295 	/* Toggle CS to flush commands. */
   13296 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   13297 	CSR_WRITE_FLUSH(sc);
   13298 	delay(2);
   13299 	CSR_WRITE(sc, WMREG_EECD, reg);
   13300 	CSR_WRITE_FLUSH(sc);
   13301 	delay(2);
   13302 
   13303 	opc = SPI_OPC_READ;
   13304 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   13305 		opc |= SPI_OPC_A8;
   13306 
   13307 	wm_eeprom_sendbits(sc, opc, 8);
   13308 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   13309 
   13310 	for (i = 0; i < wordcnt; i++) {
   13311 		wm_eeprom_recvbits(sc, &val, 16);
   13312 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   13313 	}
   13314 
   13315 	/* Raise CS and clear SK. */
   13316 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   13317 	CSR_WRITE(sc, WMREG_EECD, reg);
   13318 	CSR_WRITE_FLUSH(sc);
   13319 	delay(2);
   13320 
   13321 out:
   13322 	sc->nvm.release(sc);
   13323 	return rv;
   13324 }
   13325 
   13326 /* Using with EERD */
   13327 
   13328 static int
   13329 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   13330 {
   13331 	uint32_t attempts = 100000;
   13332 	uint32_t i, reg = 0;
   13333 	int32_t done = -1;
   13334 
   13335 	for (i = 0; i < attempts; i++) {
   13336 		reg = CSR_READ(sc, rw);
   13337 
   13338 		if (reg & EERD_DONE) {
   13339 			done = 0;
   13340 			break;
   13341 		}
   13342 		delay(5);
   13343 	}
   13344 
   13345 	return done;
   13346 }
   13347 
   13348 static int
   13349 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   13350 {
   13351 	int i, eerd = 0;
   13352 	int rv = 0;
   13353 
   13354 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13355 		device_xname(sc->sc_dev), __func__));
   13356 
   13357 	if (sc->nvm.acquire(sc) != 0)
   13358 		return -1;
   13359 
   13360 	for (i = 0; i < wordcnt; i++) {
   13361 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   13362 		CSR_WRITE(sc, WMREG_EERD, eerd);
   13363 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   13364 		if (rv != 0) {
   13365 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   13366 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   13367 			break;
   13368 		}
   13369 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   13370 	}
   13371 
   13372 	sc->nvm.release(sc);
   13373 	return rv;
   13374 }
   13375 
   13376 /* Flash */
   13377 
   13378 static int
   13379 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   13380 {
   13381 	uint32_t eecd;
   13382 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   13383 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   13384 	uint32_t nvm_dword = 0;
   13385 	uint8_t sig_byte = 0;
   13386 	int rv;
   13387 
   13388 	switch (sc->sc_type) {
   13389 	case WM_T_PCH_SPT:
   13390 	case WM_T_PCH_CNP:
   13391 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   13392 		act_offset = ICH_NVM_SIG_WORD * 2;
   13393 
   13394 		/* Set bank to 0 in case flash read fails. */
   13395 		*bank = 0;
   13396 
   13397 		/* Check bank 0 */
   13398 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   13399 		if (rv != 0)
   13400 			return rv;
   13401 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13402 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13403 			*bank = 0;
   13404 			return 0;
   13405 		}
   13406 
   13407 		/* Check bank 1 */
   13408 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   13409 		    &nvm_dword);
   13410 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13411 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13412 			*bank = 1;
   13413 			return 0;
   13414 		}
   13415 		aprint_error_dev(sc->sc_dev,
   13416 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   13417 		return -1;
   13418 	case WM_T_ICH8:
   13419 	case WM_T_ICH9:
   13420 		eecd = CSR_READ(sc, WMREG_EECD);
   13421 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   13422 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   13423 			return 0;
   13424 		}
   13425 		/* FALLTHROUGH */
   13426 	default:
   13427 		/* Default to 0 */
   13428 		*bank = 0;
   13429 
   13430 		/* Check bank 0 */
   13431 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   13432 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13433 			*bank = 0;
   13434 			return 0;
   13435 		}
   13436 
   13437 		/* Check bank 1 */
   13438 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   13439 		    &sig_byte);
   13440 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13441 			*bank = 1;
   13442 			return 0;
   13443 		}
   13444 	}
   13445 
   13446 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   13447 		device_xname(sc->sc_dev)));
   13448 	return -1;
   13449 }
   13450 
   13451 /******************************************************************************
   13452  * This function does initial flash setup so that a new read/write/erase cycle
   13453  * can be started.
   13454  *
   13455  * sc - The pointer to the hw structure
   13456  ****************************************************************************/
   13457 static int32_t
   13458 wm_ich8_cycle_init(struct wm_softc *sc)
   13459 {
   13460 	uint16_t hsfsts;
   13461 	int32_t error = 1;
   13462 	int32_t i     = 0;
   13463 
   13464 	if (sc->sc_type >= WM_T_PCH_SPT)
   13465 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   13466 	else
   13467 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13468 
   13469 	/* May be check the Flash Des Valid bit in Hw status */
   13470 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   13471 		return error;
   13472 
   13473 	/* Clear FCERR in Hw status by writing 1 */
   13474 	/* Clear DAEL in Hw status by writing a 1 */
   13475 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   13476 
   13477 	if (sc->sc_type >= WM_T_PCH_SPT)
   13478 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   13479 	else
   13480 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13481 
   13482 	/*
   13483 	 * Either we should have a hardware SPI cycle in progress bit to check
   13484 	 * against, in order to start a new cycle or FDONE bit should be
   13485 	 * changed in the hardware so that it is 1 after hardware reset, which
   13486 	 * can then be used as an indication whether a cycle is in progress or
   13487 	 * has been completed .. we should also have some software semaphore
   13488 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   13489 	 * threads access to those bits can be sequentiallized or a way so that
   13490 	 * 2 threads don't start the cycle at the same time
   13491 	 */
   13492 
   13493 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13494 		/*
   13495 		 * There is no cycle running at present, so we can start a
   13496 		 * cycle
   13497 		 */
   13498 
   13499 		/* Begin by setting Flash Cycle Done. */
   13500 		hsfsts |= HSFSTS_DONE;
   13501 		if (sc->sc_type >= WM_T_PCH_SPT)
   13502 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13503 			    hsfsts & 0xffffUL);
   13504 		else
   13505 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13506 		error = 0;
   13507 	} else {
   13508 		/*
   13509 		 * Otherwise poll for sometime so the current cycle has a
   13510 		 * chance to end before giving up.
   13511 		 */
   13512 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   13513 			if (sc->sc_type >= WM_T_PCH_SPT)
   13514 				hsfsts = ICH8_FLASH_READ32(sc,
   13515 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13516 			else
   13517 				hsfsts = ICH8_FLASH_READ16(sc,
   13518 				    ICH_FLASH_HSFSTS);
   13519 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13520 				error = 0;
   13521 				break;
   13522 			}
   13523 			delay(1);
   13524 		}
   13525 		if (error == 0) {
   13526 			/*
   13527 			 * Successful in waiting for previous cycle to timeout,
   13528 			 * now set the Flash Cycle Done.
   13529 			 */
   13530 			hsfsts |= HSFSTS_DONE;
   13531 			if (sc->sc_type >= WM_T_PCH_SPT)
   13532 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13533 				    hsfsts & 0xffffUL);
   13534 			else
   13535 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   13536 				    hsfsts);
   13537 		}
   13538 	}
   13539 	return error;
   13540 }
   13541 
   13542 /******************************************************************************
   13543  * This function starts a flash cycle and waits for its completion
   13544  *
   13545  * sc - The pointer to the hw structure
   13546  ****************************************************************************/
   13547 static int32_t
   13548 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   13549 {
   13550 	uint16_t hsflctl;
   13551 	uint16_t hsfsts;
   13552 	int32_t error = 1;
   13553 	uint32_t i = 0;
   13554 
   13555 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   13556 	if (sc->sc_type >= WM_T_PCH_SPT)
   13557 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   13558 	else
   13559 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13560 	hsflctl |= HSFCTL_GO;
   13561 	if (sc->sc_type >= WM_T_PCH_SPT)
   13562 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13563 		    (uint32_t)hsflctl << 16);
   13564 	else
   13565 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13566 
   13567 	/* Wait till FDONE bit is set to 1 */
   13568 	do {
   13569 		if (sc->sc_type >= WM_T_PCH_SPT)
   13570 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13571 			    & 0xffffUL;
   13572 		else
   13573 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13574 		if (hsfsts & HSFSTS_DONE)
   13575 			break;
   13576 		delay(1);
   13577 		i++;
   13578 	} while (i < timeout);
   13579 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   13580 		error = 0;
   13581 
   13582 	return error;
   13583 }
   13584 
   13585 /******************************************************************************
   13586  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   13587  *
   13588  * sc - The pointer to the hw structure
   13589  * index - The index of the byte or word to read.
   13590  * size - Size of data to read, 1=byte 2=word, 4=dword
   13591  * data - Pointer to the word to store the value read.
   13592  *****************************************************************************/
   13593 static int32_t
   13594 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   13595     uint32_t size, uint32_t *data)
   13596 {
   13597 	uint16_t hsfsts;
   13598 	uint16_t hsflctl;
   13599 	uint32_t flash_linear_address;
   13600 	uint32_t flash_data = 0;
   13601 	int32_t error = 1;
   13602 	int32_t count = 0;
   13603 
   13604 	if (size < 1  || size > 4 || data == 0x0 ||
   13605 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   13606 		return error;
   13607 
   13608 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   13609 	    sc->sc_ich8_flash_base;
   13610 
   13611 	do {
   13612 		delay(1);
   13613 		/* Steps */
   13614 		error = wm_ich8_cycle_init(sc);
   13615 		if (error)
   13616 			break;
   13617 
   13618 		if (sc->sc_type >= WM_T_PCH_SPT)
   13619 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13620 			    >> 16;
   13621 		else
   13622 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13623 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   13624 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   13625 		    & HSFCTL_BCOUNT_MASK;
   13626 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   13627 		if (sc->sc_type >= WM_T_PCH_SPT) {
   13628 			/*
   13629 			 * In SPT, This register is in Lan memory space, not
   13630 			 * flash. Therefore, only 32 bit access is supported.
   13631 			 */
   13632 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13633 			    (uint32_t)hsflctl << 16);
   13634 		} else
   13635 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13636 
   13637 		/*
   13638 		 * Write the last 24 bits of index into Flash Linear address
   13639 		 * field in Flash Address
   13640 		 */
   13641 		/* TODO: TBD maybe check the index against the size of flash */
   13642 
   13643 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   13644 
   13645 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   13646 
   13647 		/*
   13648 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   13649 		 * the whole sequence a few more times, else read in (shift in)
   13650 		 * the Flash Data0, the order is least significant byte first
   13651 		 * msb to lsb
   13652 		 */
   13653 		if (error == 0) {
   13654 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13655 			if (size == 1)
   13656 				*data = (uint8_t)(flash_data & 0x000000FF);
   13657 			else if (size == 2)
   13658 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13659 			else if (size == 4)
   13660 				*data = (uint32_t)flash_data;
   13661 			break;
   13662 		} else {
   13663 			/*
   13664 			 * If we've gotten here, then things are probably
   13665 			 * completely hosed, but if the error condition is
   13666 			 * detected, it won't hurt to give it another try...
   13667 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13668 			 */
   13669 			if (sc->sc_type >= WM_T_PCH_SPT)
   13670 				hsfsts = ICH8_FLASH_READ32(sc,
   13671 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13672 			else
   13673 				hsfsts = ICH8_FLASH_READ16(sc,
   13674 				    ICH_FLASH_HSFSTS);
   13675 
   13676 			if (hsfsts & HSFSTS_ERR) {
   13677 				/* Repeat for some time before giving up. */
   13678 				continue;
   13679 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13680 				break;
   13681 		}
   13682 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13683 
   13684 	return error;
   13685 }
   13686 
   13687 /******************************************************************************
   13688  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13689  *
   13690  * sc - pointer to wm_hw structure
   13691  * index - The index of the byte to read.
   13692  * data - Pointer to a byte to store the value read.
   13693  *****************************************************************************/
   13694 static int32_t
   13695 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13696 {
   13697 	int32_t status;
   13698 	uint32_t word = 0;
   13699 
   13700 	status = wm_read_ich8_data(sc, index, 1, &word);
   13701 	if (status == 0)
   13702 		*data = (uint8_t)word;
   13703 	else
   13704 		*data = 0;
   13705 
   13706 	return status;
   13707 }
   13708 
   13709 /******************************************************************************
   13710  * Reads a word from the NVM using the ICH8 flash access registers.
   13711  *
   13712  * sc - pointer to wm_hw structure
   13713  * index - The starting byte index of the word to read.
   13714  * data - Pointer to a word to store the value read.
   13715  *****************************************************************************/
   13716 static int32_t
   13717 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13718 {
   13719 	int32_t status;
   13720 	uint32_t word = 0;
   13721 
   13722 	status = wm_read_ich8_data(sc, index, 2, &word);
   13723 	if (status == 0)
   13724 		*data = (uint16_t)word;
   13725 	else
   13726 		*data = 0;
   13727 
   13728 	return status;
   13729 }
   13730 
   13731 /******************************************************************************
   13732  * Reads a dword from the NVM using the ICH8 flash access registers.
   13733  *
   13734  * sc - pointer to wm_hw structure
   13735  * index - The starting byte index of the word to read.
   13736  * data - Pointer to a word to store the value read.
   13737  *****************************************************************************/
   13738 static int32_t
   13739 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13740 {
   13741 	int32_t status;
   13742 
   13743 	status = wm_read_ich8_data(sc, index, 4, data);
   13744 	return status;
   13745 }
   13746 
   13747 /******************************************************************************
   13748  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13749  * register.
   13750  *
   13751  * sc - Struct containing variables accessed by shared code
   13752  * offset - offset of word in the EEPROM to read
   13753  * data - word read from the EEPROM
   13754  * words - number of words to read
   13755  *****************************************************************************/
   13756 static int
   13757 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13758 {
   13759 	int32_t	 rv = 0;
   13760 	uint32_t flash_bank = 0;
   13761 	uint32_t act_offset = 0;
   13762 	uint32_t bank_offset = 0;
   13763 	uint16_t word = 0;
   13764 	uint16_t i = 0;
   13765 
   13766 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13767 		device_xname(sc->sc_dev), __func__));
   13768 
   13769 	if (sc->nvm.acquire(sc) != 0)
   13770 		return -1;
   13771 
   13772 	/*
   13773 	 * We need to know which is the valid flash bank.  In the event
   13774 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13775 	 * managing flash_bank. So it cannot be trusted and needs
   13776 	 * to be updated with each read.
   13777 	 */
   13778 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13779 	if (rv) {
   13780 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13781 			device_xname(sc->sc_dev)));
   13782 		flash_bank = 0;
   13783 	}
   13784 
   13785 	/*
   13786 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13787 	 * size
   13788 	 */
   13789 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13790 
   13791 	for (i = 0; i < words; i++) {
   13792 		/* The NVM part needs a byte offset, hence * 2 */
   13793 		act_offset = bank_offset + ((offset + i) * 2);
   13794 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13795 		if (rv) {
   13796 			aprint_error_dev(sc->sc_dev,
   13797 			    "%s: failed to read NVM\n", __func__);
   13798 			break;
   13799 		}
   13800 		data[i] = word;
   13801 	}
   13802 
   13803 	sc->nvm.release(sc);
   13804 	return rv;
   13805 }
   13806 
   13807 /******************************************************************************
   13808  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13809  * register.
   13810  *
   13811  * sc - Struct containing variables accessed by shared code
   13812  * offset - offset of word in the EEPROM to read
   13813  * data - word read from the EEPROM
   13814  * words - number of words to read
   13815  *****************************************************************************/
   13816 static int
   13817 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13818 {
   13819 	int32_t	 rv = 0;
   13820 	uint32_t flash_bank = 0;
   13821 	uint32_t act_offset = 0;
   13822 	uint32_t bank_offset = 0;
   13823 	uint32_t dword = 0;
   13824 	uint16_t i = 0;
   13825 
   13826 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13827 		device_xname(sc->sc_dev), __func__));
   13828 
   13829 	if (sc->nvm.acquire(sc) != 0)
   13830 		return -1;
   13831 
   13832 	/*
   13833 	 * We need to know which is the valid flash bank.  In the event
   13834 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13835 	 * managing flash_bank. So it cannot be trusted and needs
   13836 	 * to be updated with each read.
   13837 	 */
   13838 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13839 	if (rv) {
   13840 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13841 			device_xname(sc->sc_dev)));
   13842 		flash_bank = 0;
   13843 	}
   13844 
   13845 	/*
   13846 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13847 	 * size
   13848 	 */
   13849 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13850 
   13851 	for (i = 0; i < words; i++) {
   13852 		/* The NVM part needs a byte offset, hence * 2 */
   13853 		act_offset = bank_offset + ((offset + i) * 2);
   13854 		/* but we must read dword aligned, so mask ... */
   13855 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13856 		if (rv) {
   13857 			aprint_error_dev(sc->sc_dev,
   13858 			    "%s: failed to read NVM\n", __func__);
   13859 			break;
   13860 		}
   13861 		/* ... and pick out low or high word */
   13862 		if ((act_offset & 0x2) == 0)
   13863 			data[i] = (uint16_t)(dword & 0xFFFF);
   13864 		else
   13865 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13866 	}
   13867 
   13868 	sc->nvm.release(sc);
   13869 	return rv;
   13870 }
   13871 
   13872 /* iNVM */
   13873 
   13874 static int
   13875 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13876 {
   13877 	int32_t	 rv = 0;
   13878 	uint32_t invm_dword;
   13879 	uint16_t i;
   13880 	uint8_t record_type, word_address;
   13881 
   13882 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13883 		device_xname(sc->sc_dev), __func__));
   13884 
   13885 	for (i = 0; i < INVM_SIZE; i++) {
   13886 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13887 		/* Get record type */
   13888 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13889 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13890 			break;
   13891 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13892 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13893 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13894 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13895 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13896 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13897 			if (word_address == address) {
   13898 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13899 				rv = 0;
   13900 				break;
   13901 			}
   13902 		}
   13903 	}
   13904 
   13905 	return rv;
   13906 }
   13907 
   13908 static int
   13909 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13910 {
   13911 	int rv = 0;
   13912 	int i;
   13913 
   13914 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13915 		device_xname(sc->sc_dev), __func__));
   13916 
   13917 	if (sc->nvm.acquire(sc) != 0)
   13918 		return -1;
   13919 
   13920 	for (i = 0; i < words; i++) {
   13921 		switch (offset + i) {
   13922 		case NVM_OFF_MACADDR:
   13923 		case NVM_OFF_MACADDR1:
   13924 		case NVM_OFF_MACADDR2:
   13925 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13926 			if (rv != 0) {
   13927 				data[i] = 0xffff;
   13928 				rv = -1;
   13929 			}
   13930 			break;
   13931 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   13932 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13933 			if (rv != 0) {
   13934 				*data = INVM_DEFAULT_AL;
   13935 				rv = 0;
   13936 			}
   13937 			break;
   13938 		case NVM_OFF_CFG2:
   13939 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13940 			if (rv != 0) {
   13941 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13942 				rv = 0;
   13943 			}
   13944 			break;
   13945 		case NVM_OFF_CFG4:
   13946 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13947 			if (rv != 0) {
   13948 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13949 				rv = 0;
   13950 			}
   13951 			break;
   13952 		case NVM_OFF_LED_1_CFG:
   13953 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13954 			if (rv != 0) {
   13955 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13956 				rv = 0;
   13957 			}
   13958 			break;
   13959 		case NVM_OFF_LED_0_2_CFG:
   13960 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13961 			if (rv != 0) {
   13962 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13963 				rv = 0;
   13964 			}
   13965 			break;
   13966 		case NVM_OFF_ID_LED_SETTINGS:
   13967 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13968 			if (rv != 0) {
   13969 				*data = ID_LED_RESERVED_FFFF;
   13970 				rv = 0;
   13971 			}
   13972 			break;
   13973 		default:
   13974 			DPRINTF(sc, WM_DEBUG_NVM,
   13975 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13976 			*data = NVM_RESERVED_WORD;
   13977 			break;
   13978 		}
   13979 	}
   13980 
   13981 	sc->nvm.release(sc);
   13982 	return rv;
   13983 }
   13984 
   13985 /* Lock, detecting NVM type, validate checksum, version and read */
   13986 
   13987 static int
   13988 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13989 {
   13990 	uint32_t eecd = 0;
   13991 
   13992 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13993 	    || sc->sc_type == WM_T_82583) {
   13994 		eecd = CSR_READ(sc, WMREG_EECD);
   13995 
   13996 		/* Isolate bits 15 & 16 */
   13997 		eecd = ((eecd >> 15) & 0x03);
   13998 
   13999 		/* If both bits are set, device is Flash type */
   14000 		if (eecd == 0x03)
   14001 			return 0;
   14002 	}
   14003 	return 1;
   14004 }
   14005 
   14006 static int
   14007 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   14008 {
   14009 	uint32_t eec;
   14010 
   14011 	eec = CSR_READ(sc, WMREG_EEC);
   14012 	if ((eec & EEC_FLASH_DETECTED) != 0)
   14013 		return 1;
   14014 
   14015 	return 0;
   14016 }
   14017 
   14018 /*
   14019  * wm_nvm_validate_checksum
   14020  *
   14021  * The checksum is defined as the sum of the first 64 (16 bit) words.
   14022  */
   14023 static int
   14024 wm_nvm_validate_checksum(struct wm_softc *sc)
   14025 {
   14026 	uint16_t checksum;
   14027 	uint16_t eeprom_data;
   14028 #ifdef WM_DEBUG
   14029 	uint16_t csum_wordaddr, valid_checksum;
   14030 #endif
   14031 	int i;
   14032 
   14033 	checksum = 0;
   14034 
   14035 	/* Don't check for I211 */
   14036 	if (sc->sc_type == WM_T_I211)
   14037 		return 0;
   14038 
   14039 #ifdef WM_DEBUG
   14040 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   14041 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   14042 		csum_wordaddr = NVM_OFF_COMPAT;
   14043 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   14044 	} else {
   14045 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   14046 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   14047 	}
   14048 
   14049 	/* Dump EEPROM image for debug */
   14050 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14051 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14052 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   14053 		/* XXX PCH_SPT? */
   14054 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   14055 		if ((eeprom_data & valid_checksum) == 0)
   14056 			DPRINTF(sc, WM_DEBUG_NVM,
   14057 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   14058 				device_xname(sc->sc_dev), eeprom_data,
   14059 				    valid_checksum));
   14060 	}
   14061 
   14062 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
   14063 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   14064 		for (i = 0; i < NVM_SIZE; i++) {
   14065 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14066 				printf("XXXX ");
   14067 			else
   14068 				printf("%04hx ", eeprom_data);
   14069 			if (i % 8 == 7)
   14070 				printf("\n");
   14071 		}
   14072 	}
   14073 
   14074 #endif /* WM_DEBUG */
   14075 
   14076 	for (i = 0; i < NVM_SIZE; i++) {
   14077 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14078 			return 1;
   14079 		checksum += eeprom_data;
   14080 	}
   14081 
   14082 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   14083 #ifdef WM_DEBUG
   14084 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   14085 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   14086 #endif
   14087 	}
   14088 
   14089 	return 0;
   14090 }
   14091 
   14092 static void
   14093 wm_nvm_version_invm(struct wm_softc *sc)
   14094 {
   14095 	uint32_t dword;
   14096 
   14097 	/*
   14098 	 * Linux's code to decode version is very strange, so we don't
   14099 	 * obey that algorithm and just use word 61 as the document.
   14100 	 * Perhaps it's not perfect though...
   14101 	 *
   14102 	 * Example:
   14103 	 *
   14104 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   14105 	 */
   14106 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   14107 	dword = __SHIFTOUT(dword, INVM_VER_1);
   14108 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   14109 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   14110 }
   14111 
   14112 static void
   14113 wm_nvm_version(struct wm_softc *sc)
   14114 {
   14115 	uint16_t major, minor, build, patch;
   14116 	uint16_t uid0, uid1;
   14117 	uint16_t nvm_data;
   14118 	uint16_t off;
   14119 	bool check_version = false;
   14120 	bool check_optionrom = false;
   14121 	bool have_build = false;
   14122 	bool have_uid = true;
   14123 
   14124 	/*
   14125 	 * Version format:
   14126 	 *
   14127 	 * XYYZ
   14128 	 * X0YZ
   14129 	 * X0YY
   14130 	 *
   14131 	 * Example:
   14132 	 *
   14133 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   14134 	 *	82571	0x50a6	5.10.6?
   14135 	 *	82572	0x506a	5.6.10?
   14136 	 *	82572EI	0x5069	5.6.9?
   14137 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   14138 	 *		0x2013	2.1.3?
   14139 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   14140 	 * ICH8+82567	0x0040	0.4.0?
   14141 	 * ICH9+82566	0x1040	1.4.0?
   14142 	 *ICH10+82567	0x0043	0.4.3?
   14143 	 *  PCH+82577	0x00c1	0.12.1?
   14144 	 * PCH2+82579	0x00d3	0.13.3?
   14145 	 *		0x00d4	0.13.4?
   14146 	 *  LPT+I218	0x0023	0.2.3?
   14147 	 *  SPT+I219	0x0084	0.8.4?
   14148 	 *  CNP+I219	0x0054	0.5.4?
   14149 	 */
   14150 
   14151 	/*
   14152 	 * XXX
   14153 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   14154 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   14155 	 */
   14156 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   14157 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   14158 		have_uid = false;
   14159 
   14160 	switch (sc->sc_type) {
   14161 	case WM_T_82571:
   14162 	case WM_T_82572:
   14163 	case WM_T_82574:
   14164 	case WM_T_82583:
   14165 		check_version = true;
   14166 		check_optionrom = true;
   14167 		have_build = true;
   14168 		break;
   14169 	case WM_T_ICH8:
   14170 	case WM_T_ICH9:
   14171 	case WM_T_ICH10:
   14172 	case WM_T_PCH:
   14173 	case WM_T_PCH2:
   14174 	case WM_T_PCH_LPT:
   14175 	case WM_T_PCH_SPT:
   14176 	case WM_T_PCH_CNP:
   14177 		check_version = true;
   14178 		have_build = true;
   14179 		have_uid = false;
   14180 		break;
   14181 	case WM_T_82575:
   14182 	case WM_T_82576:
   14183 	case WM_T_82580:
   14184 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   14185 			check_version = true;
   14186 		break;
   14187 	case WM_T_I211:
   14188 		wm_nvm_version_invm(sc);
   14189 		have_uid = false;
   14190 		goto printver;
   14191 	case WM_T_I210:
   14192 		if (!wm_nvm_flash_presence_i210(sc)) {
   14193 			wm_nvm_version_invm(sc);
   14194 			have_uid = false;
   14195 			goto printver;
   14196 		}
   14197 		/* FALLTHROUGH */
   14198 	case WM_T_I350:
   14199 	case WM_T_I354:
   14200 		check_version = true;
   14201 		check_optionrom = true;
   14202 		break;
   14203 	default:
   14204 		return;
   14205 	}
   14206 	if (check_version
   14207 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   14208 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   14209 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   14210 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   14211 			build = nvm_data & NVM_BUILD_MASK;
   14212 			have_build = true;
   14213 		} else
   14214 			minor = nvm_data & 0x00ff;
   14215 
   14216 		/* Decimal */
   14217 		minor = (minor / 16) * 10 + (minor % 16);
   14218 		sc->sc_nvm_ver_major = major;
   14219 		sc->sc_nvm_ver_minor = minor;
   14220 
   14221 printver:
   14222 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   14223 		    sc->sc_nvm_ver_minor);
   14224 		if (have_build) {
   14225 			sc->sc_nvm_ver_build = build;
   14226 			aprint_verbose(".%d", build);
   14227 		}
   14228 	}
   14229 
   14230 	/* Assume the Option ROM area is at avove NVM_SIZE */
   14231 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   14232 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   14233 		/* Option ROM Version */
   14234 		if ((off != 0x0000) && (off != 0xffff)) {
   14235 			int rv;
   14236 
   14237 			off += NVM_COMBO_VER_OFF;
   14238 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   14239 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   14240 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   14241 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   14242 				/* 16bits */
   14243 				major = uid0 >> 8;
   14244 				build = (uid0 << 8) | (uid1 >> 8);
   14245 				patch = uid1 & 0x00ff;
   14246 				aprint_verbose(", option ROM Version %d.%d.%d",
   14247 				    major, build, patch);
   14248 			}
   14249 		}
   14250 	}
   14251 
   14252 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   14253 		aprint_verbose(", Image Unique ID %08x",
   14254 		    ((uint32_t)uid1 << 16) | uid0);
   14255 }
   14256 
   14257 /*
   14258  * wm_nvm_read:
   14259  *
   14260  *	Read data from the serial EEPROM.
   14261  */
   14262 static int
   14263 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14264 {
   14265 	int rv;
   14266 
   14267 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14268 		device_xname(sc->sc_dev), __func__));
   14269 
   14270 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   14271 		return -1;
   14272 
   14273 	rv = sc->nvm.read(sc, word, wordcnt, data);
   14274 
   14275 	return rv;
   14276 }
   14277 
   14278 /*
   14279  * Hardware semaphores.
   14280  * Very complexed...
   14281  */
   14282 
   14283 static int
   14284 wm_get_null(struct wm_softc *sc)
   14285 {
   14286 
   14287 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14288 		device_xname(sc->sc_dev), __func__));
   14289 	return 0;
   14290 }
   14291 
   14292 static void
   14293 wm_put_null(struct wm_softc *sc)
   14294 {
   14295 
   14296 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14297 		device_xname(sc->sc_dev), __func__));
   14298 	return;
   14299 }
   14300 
   14301 static int
   14302 wm_get_eecd(struct wm_softc *sc)
   14303 {
   14304 	uint32_t reg;
   14305 	int x;
   14306 
   14307 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14308 		device_xname(sc->sc_dev), __func__));
   14309 
   14310 	reg = CSR_READ(sc, WMREG_EECD);
   14311 
   14312 	/* Request EEPROM access. */
   14313 	reg |= EECD_EE_REQ;
   14314 	CSR_WRITE(sc, WMREG_EECD, reg);
   14315 
   14316 	/* ..and wait for it to be granted. */
   14317 	for (x = 0; x < 1000; x++) {
   14318 		reg = CSR_READ(sc, WMREG_EECD);
   14319 		if (reg & EECD_EE_GNT)
   14320 			break;
   14321 		delay(5);
   14322 	}
   14323 	if ((reg & EECD_EE_GNT) == 0) {
   14324 		aprint_error_dev(sc->sc_dev,
   14325 		    "could not acquire EEPROM GNT\n");
   14326 		reg &= ~EECD_EE_REQ;
   14327 		CSR_WRITE(sc, WMREG_EECD, reg);
   14328 		return -1;
   14329 	}
   14330 
   14331 	return 0;
   14332 }
   14333 
   14334 static void
   14335 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   14336 {
   14337 
   14338 	*eecd |= EECD_SK;
   14339 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14340 	CSR_WRITE_FLUSH(sc);
   14341 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14342 		delay(1);
   14343 	else
   14344 		delay(50);
   14345 }
   14346 
   14347 static void
   14348 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   14349 {
   14350 
   14351 	*eecd &= ~EECD_SK;
   14352 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14353 	CSR_WRITE_FLUSH(sc);
   14354 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14355 		delay(1);
   14356 	else
   14357 		delay(50);
   14358 }
   14359 
   14360 static void
   14361 wm_put_eecd(struct wm_softc *sc)
   14362 {
   14363 	uint32_t reg;
   14364 
   14365 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14366 		device_xname(sc->sc_dev), __func__));
   14367 
   14368 	/* Stop nvm */
   14369 	reg = CSR_READ(sc, WMREG_EECD);
   14370 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   14371 		/* Pull CS high */
   14372 		reg |= EECD_CS;
   14373 		wm_nvm_eec_clock_lower(sc, &reg);
   14374 	} else {
   14375 		/* CS on Microwire is active-high */
   14376 		reg &= ~(EECD_CS | EECD_DI);
   14377 		CSR_WRITE(sc, WMREG_EECD, reg);
   14378 		wm_nvm_eec_clock_raise(sc, &reg);
   14379 		wm_nvm_eec_clock_lower(sc, &reg);
   14380 	}
   14381 
   14382 	reg = CSR_READ(sc, WMREG_EECD);
   14383 	reg &= ~EECD_EE_REQ;
   14384 	CSR_WRITE(sc, WMREG_EECD, reg);
   14385 
   14386 	return;
   14387 }
   14388 
   14389 /*
   14390  * Get hardware semaphore.
   14391  * Same as e1000_get_hw_semaphore_generic()
   14392  */
   14393 static int
   14394 wm_get_swsm_semaphore(struct wm_softc *sc)
   14395 {
   14396 	int32_t timeout;
   14397 	uint32_t swsm;
   14398 
   14399 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14400 		device_xname(sc->sc_dev), __func__));
   14401 	KASSERT(sc->sc_nvm_wordsize > 0);
   14402 
   14403 retry:
   14404 	/* Get the SW semaphore. */
   14405 	timeout = sc->sc_nvm_wordsize + 1;
   14406 	while (timeout) {
   14407 		swsm = CSR_READ(sc, WMREG_SWSM);
   14408 
   14409 		if ((swsm & SWSM_SMBI) == 0)
   14410 			break;
   14411 
   14412 		delay(50);
   14413 		timeout--;
   14414 	}
   14415 
   14416 	if (timeout == 0) {
   14417 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   14418 			/*
   14419 			 * In rare circumstances, the SW semaphore may already
   14420 			 * be held unintentionally. Clear the semaphore once
   14421 			 * before giving up.
   14422 			 */
   14423 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   14424 			wm_put_swsm_semaphore(sc);
   14425 			goto retry;
   14426 		}
   14427 		aprint_error_dev(sc->sc_dev,
   14428 		    "could not acquire SWSM SMBI\n");
   14429 		return 1;
   14430 	}
   14431 
   14432 	/* Get the FW semaphore. */
   14433 	timeout = sc->sc_nvm_wordsize + 1;
   14434 	while (timeout) {
   14435 		swsm = CSR_READ(sc, WMREG_SWSM);
   14436 		swsm |= SWSM_SWESMBI;
   14437 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   14438 		/* If we managed to set the bit we got the semaphore. */
   14439 		swsm = CSR_READ(sc, WMREG_SWSM);
   14440 		if (swsm & SWSM_SWESMBI)
   14441 			break;
   14442 
   14443 		delay(50);
   14444 		timeout--;
   14445 	}
   14446 
   14447 	if (timeout == 0) {
   14448 		aprint_error_dev(sc->sc_dev,
   14449 		    "could not acquire SWSM SWESMBI\n");
   14450 		/* Release semaphores */
   14451 		wm_put_swsm_semaphore(sc);
   14452 		return 1;
   14453 	}
   14454 	return 0;
   14455 }
   14456 
   14457 /*
   14458  * Put hardware semaphore.
   14459  * Same as e1000_put_hw_semaphore_generic()
   14460  */
   14461 static void
   14462 wm_put_swsm_semaphore(struct wm_softc *sc)
   14463 {
   14464 	uint32_t swsm;
   14465 
   14466 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14467 		device_xname(sc->sc_dev), __func__));
   14468 
   14469 	swsm = CSR_READ(sc, WMREG_SWSM);
   14470 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   14471 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   14472 }
   14473 
   14474 /*
   14475  * Get SW/FW semaphore.
   14476  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   14477  */
   14478 static int
   14479 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14480 {
   14481 	uint32_t swfw_sync;
   14482 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   14483 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   14484 	int timeout;
   14485 
   14486 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14487 		device_xname(sc->sc_dev), __func__));
   14488 
   14489 	if (sc->sc_type == WM_T_80003)
   14490 		timeout = 50;
   14491 	else
   14492 		timeout = 200;
   14493 
   14494 	while (timeout) {
   14495 		if (wm_get_swsm_semaphore(sc)) {
   14496 			aprint_error_dev(sc->sc_dev,
   14497 			    "%s: failed to get semaphore\n",
   14498 			    __func__);
   14499 			return 1;
   14500 		}
   14501 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14502 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   14503 			swfw_sync |= swmask;
   14504 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14505 			wm_put_swsm_semaphore(sc);
   14506 			return 0;
   14507 		}
   14508 		wm_put_swsm_semaphore(sc);
   14509 		delay(5000);
   14510 		timeout--;
   14511 	}
   14512 	device_printf(sc->sc_dev,
   14513 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   14514 	    mask, swfw_sync);
   14515 	return 1;
   14516 }
   14517 
   14518 static void
   14519 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14520 {
   14521 	uint32_t swfw_sync;
   14522 
   14523 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14524 		device_xname(sc->sc_dev), __func__));
   14525 
   14526 	while (wm_get_swsm_semaphore(sc) != 0)
   14527 		continue;
   14528 
   14529 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14530 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   14531 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14532 
   14533 	wm_put_swsm_semaphore(sc);
   14534 }
   14535 
   14536 static int
   14537 wm_get_nvm_80003(struct wm_softc *sc)
   14538 {
   14539 	int rv;
   14540 
   14541 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14542 		device_xname(sc->sc_dev), __func__));
   14543 
   14544 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   14545 		aprint_error_dev(sc->sc_dev,
   14546 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   14547 		return rv;
   14548 	}
   14549 
   14550 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14551 	    && (rv = wm_get_eecd(sc)) != 0) {
   14552 		aprint_error_dev(sc->sc_dev,
   14553 		    "%s: failed to get semaphore(EECD)\n", __func__);
   14554 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14555 		return rv;
   14556 	}
   14557 
   14558 	return 0;
   14559 }
   14560 
   14561 static void
   14562 wm_put_nvm_80003(struct wm_softc *sc)
   14563 {
   14564 
   14565 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14566 		device_xname(sc->sc_dev), __func__));
   14567 
   14568 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14569 		wm_put_eecd(sc);
   14570 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14571 }
   14572 
   14573 static int
   14574 wm_get_nvm_82571(struct wm_softc *sc)
   14575 {
   14576 	int rv;
   14577 
   14578 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14579 		device_xname(sc->sc_dev), __func__));
   14580 
   14581 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   14582 		return rv;
   14583 
   14584 	switch (sc->sc_type) {
   14585 	case WM_T_82573:
   14586 		break;
   14587 	default:
   14588 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14589 			rv = wm_get_eecd(sc);
   14590 		break;
   14591 	}
   14592 
   14593 	if (rv != 0) {
   14594 		aprint_error_dev(sc->sc_dev,
   14595 		    "%s: failed to get semaphore\n",
   14596 		    __func__);
   14597 		wm_put_swsm_semaphore(sc);
   14598 	}
   14599 
   14600 	return rv;
   14601 }
   14602 
   14603 static void
   14604 wm_put_nvm_82571(struct wm_softc *sc)
   14605 {
   14606 
   14607 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14608 		device_xname(sc->sc_dev), __func__));
   14609 
   14610 	switch (sc->sc_type) {
   14611 	case WM_T_82573:
   14612 		break;
   14613 	default:
   14614 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14615 			wm_put_eecd(sc);
   14616 		break;
   14617 	}
   14618 
   14619 	wm_put_swsm_semaphore(sc);
   14620 }
   14621 
   14622 static int
   14623 wm_get_phy_82575(struct wm_softc *sc)
   14624 {
   14625 
   14626 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14627 		device_xname(sc->sc_dev), __func__));
   14628 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14629 }
   14630 
   14631 static void
   14632 wm_put_phy_82575(struct wm_softc *sc)
   14633 {
   14634 
   14635 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14636 		device_xname(sc->sc_dev), __func__));
   14637 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14638 }
   14639 
   14640 static int
   14641 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   14642 {
   14643 	uint32_t ext_ctrl;
   14644 	int timeout = 200;
   14645 
   14646 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14647 		device_xname(sc->sc_dev), __func__));
   14648 
   14649 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14650 	for (timeout = 0; timeout < 200; timeout++) {
   14651 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14652 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14653 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14654 
   14655 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14656 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14657 			return 0;
   14658 		delay(5000);
   14659 	}
   14660 	device_printf(sc->sc_dev,
   14661 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   14662 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14663 	return 1;
   14664 }
   14665 
   14666 static void
   14667 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   14668 {
   14669 	uint32_t ext_ctrl;
   14670 
   14671 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14672 		device_xname(sc->sc_dev), __func__));
   14673 
   14674 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14675 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14676 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14677 
   14678 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14679 }
   14680 
   14681 static int
   14682 wm_get_swflag_ich8lan(struct wm_softc *sc)
   14683 {
   14684 	uint32_t ext_ctrl;
   14685 	int timeout;
   14686 
   14687 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14688 		device_xname(sc->sc_dev), __func__));
   14689 	mutex_enter(sc->sc_ich_phymtx);
   14690 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14691 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14692 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14693 			break;
   14694 		delay(1000);
   14695 	}
   14696 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14697 		device_printf(sc->sc_dev,
   14698 		    "SW has already locked the resource\n");
   14699 		goto out;
   14700 	}
   14701 
   14702 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14703 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14704 	for (timeout = 0; timeout < 1000; timeout++) {
   14705 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14706 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14707 			break;
   14708 		delay(1000);
   14709 	}
   14710 	if (timeout >= 1000) {
   14711 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   14712 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14713 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14714 		goto out;
   14715 	}
   14716 	return 0;
   14717 
   14718 out:
   14719 	mutex_exit(sc->sc_ich_phymtx);
   14720 	return 1;
   14721 }
   14722 
   14723 static void
   14724 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14725 {
   14726 	uint32_t ext_ctrl;
   14727 
   14728 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14729 		device_xname(sc->sc_dev), __func__));
   14730 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14731 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14732 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14733 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14734 	} else {
   14735 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   14736 	}
   14737 
   14738 	mutex_exit(sc->sc_ich_phymtx);
   14739 }
   14740 
   14741 static int
   14742 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14743 {
   14744 
   14745 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14746 		device_xname(sc->sc_dev), __func__));
   14747 	mutex_enter(sc->sc_ich_nvmmtx);
   14748 
   14749 	return 0;
   14750 }
   14751 
   14752 static void
   14753 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14754 {
   14755 
   14756 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14757 		device_xname(sc->sc_dev), __func__));
   14758 	mutex_exit(sc->sc_ich_nvmmtx);
   14759 }
   14760 
   14761 static int
   14762 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14763 {
   14764 	int i = 0;
   14765 	uint32_t reg;
   14766 
   14767 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14768 		device_xname(sc->sc_dev), __func__));
   14769 
   14770 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14771 	do {
   14772 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14773 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14774 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14775 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14776 			break;
   14777 		delay(2*1000);
   14778 		i++;
   14779 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14780 
   14781 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14782 		wm_put_hw_semaphore_82573(sc);
   14783 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14784 		    device_xname(sc->sc_dev));
   14785 		return -1;
   14786 	}
   14787 
   14788 	return 0;
   14789 }
   14790 
   14791 static void
   14792 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14793 {
   14794 	uint32_t reg;
   14795 
   14796 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14797 		device_xname(sc->sc_dev), __func__));
   14798 
   14799 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14800 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14801 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14802 }
   14803 
   14804 /*
   14805  * Management mode and power management related subroutines.
   14806  * BMC, AMT, suspend/resume and EEE.
   14807  */
   14808 
   14809 #ifdef WM_WOL
   14810 static int
   14811 wm_check_mng_mode(struct wm_softc *sc)
   14812 {
   14813 	int rv;
   14814 
   14815 	switch (sc->sc_type) {
   14816 	case WM_T_ICH8:
   14817 	case WM_T_ICH9:
   14818 	case WM_T_ICH10:
   14819 	case WM_T_PCH:
   14820 	case WM_T_PCH2:
   14821 	case WM_T_PCH_LPT:
   14822 	case WM_T_PCH_SPT:
   14823 	case WM_T_PCH_CNP:
   14824 		rv = wm_check_mng_mode_ich8lan(sc);
   14825 		break;
   14826 	case WM_T_82574:
   14827 	case WM_T_82583:
   14828 		rv = wm_check_mng_mode_82574(sc);
   14829 		break;
   14830 	case WM_T_82571:
   14831 	case WM_T_82572:
   14832 	case WM_T_82573:
   14833 	case WM_T_80003:
   14834 		rv = wm_check_mng_mode_generic(sc);
   14835 		break;
   14836 	default:
   14837 		/* Noting to do */
   14838 		rv = 0;
   14839 		break;
   14840 	}
   14841 
   14842 	return rv;
   14843 }
   14844 
   14845 static int
   14846 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14847 {
   14848 	uint32_t fwsm;
   14849 
   14850 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14851 
   14852 	if (((fwsm & FWSM_FW_VALID) != 0)
   14853 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14854 		return 1;
   14855 
   14856 	return 0;
   14857 }
   14858 
   14859 static int
   14860 wm_check_mng_mode_82574(struct wm_softc *sc)
   14861 {
   14862 	uint16_t data;
   14863 
   14864 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14865 
   14866 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14867 		return 1;
   14868 
   14869 	return 0;
   14870 }
   14871 
   14872 static int
   14873 wm_check_mng_mode_generic(struct wm_softc *sc)
   14874 {
   14875 	uint32_t fwsm;
   14876 
   14877 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14878 
   14879 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14880 		return 1;
   14881 
   14882 	return 0;
   14883 }
   14884 #endif /* WM_WOL */
   14885 
   14886 static int
   14887 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14888 {
   14889 	uint32_t manc, fwsm, factps;
   14890 
   14891 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14892 		return 0;
   14893 
   14894 	manc = CSR_READ(sc, WMREG_MANC);
   14895 
   14896 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14897 		device_xname(sc->sc_dev), manc));
   14898 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14899 		return 0;
   14900 
   14901 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14902 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14903 		factps = CSR_READ(sc, WMREG_FACTPS);
   14904 		if (((factps & FACTPS_MNGCG) == 0)
   14905 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14906 			return 1;
   14907 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14908 		uint16_t data;
   14909 
   14910 		factps = CSR_READ(sc, WMREG_FACTPS);
   14911 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14912 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14913 			device_xname(sc->sc_dev), factps, data));
   14914 		if (((factps & FACTPS_MNGCG) == 0)
   14915 		    && ((data & NVM_CFG2_MNGM_MASK)
   14916 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14917 			return 1;
   14918 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14919 	    && ((manc & MANC_ASF_EN) == 0))
   14920 		return 1;
   14921 
   14922 	return 0;
   14923 }
   14924 
   14925 static bool
   14926 wm_phy_resetisblocked(struct wm_softc *sc)
   14927 {
   14928 	bool blocked = false;
   14929 	uint32_t reg;
   14930 	int i = 0;
   14931 
   14932 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   14933 		device_xname(sc->sc_dev), __func__));
   14934 
   14935 	switch (sc->sc_type) {
   14936 	case WM_T_ICH8:
   14937 	case WM_T_ICH9:
   14938 	case WM_T_ICH10:
   14939 	case WM_T_PCH:
   14940 	case WM_T_PCH2:
   14941 	case WM_T_PCH_LPT:
   14942 	case WM_T_PCH_SPT:
   14943 	case WM_T_PCH_CNP:
   14944 		do {
   14945 			reg = CSR_READ(sc, WMREG_FWSM);
   14946 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14947 				blocked = true;
   14948 				delay(10*1000);
   14949 				continue;
   14950 			}
   14951 			blocked = false;
   14952 		} while (blocked && (i++ < 30));
   14953 		return blocked;
   14954 		break;
   14955 	case WM_T_82571:
   14956 	case WM_T_82572:
   14957 	case WM_T_82573:
   14958 	case WM_T_82574:
   14959 	case WM_T_82583:
   14960 	case WM_T_80003:
   14961 		reg = CSR_READ(sc, WMREG_MANC);
   14962 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14963 			return true;
   14964 		else
   14965 			return false;
   14966 		break;
   14967 	default:
   14968 		/* No problem */
   14969 		break;
   14970 	}
   14971 
   14972 	return false;
   14973 }
   14974 
   14975 static void
   14976 wm_get_hw_control(struct wm_softc *sc)
   14977 {
   14978 	uint32_t reg;
   14979 
   14980 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14981 		device_xname(sc->sc_dev), __func__));
   14982 
   14983 	if (sc->sc_type == WM_T_82573) {
   14984 		reg = CSR_READ(sc, WMREG_SWSM);
   14985 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14986 	} else if (sc->sc_type >= WM_T_82571) {
   14987 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14988 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14989 	}
   14990 }
   14991 
   14992 static void
   14993 wm_release_hw_control(struct wm_softc *sc)
   14994 {
   14995 	uint32_t reg;
   14996 
   14997 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14998 		device_xname(sc->sc_dev), __func__));
   14999 
   15000 	if (sc->sc_type == WM_T_82573) {
   15001 		reg = CSR_READ(sc, WMREG_SWSM);
   15002 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   15003 	} else if (sc->sc_type >= WM_T_82571) {
   15004 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15005 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   15006 	}
   15007 }
   15008 
   15009 static void
   15010 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   15011 {
   15012 	uint32_t reg;
   15013 
   15014 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15015 		device_xname(sc->sc_dev), __func__));
   15016 
   15017 	if (sc->sc_type < WM_T_PCH2)
   15018 		return;
   15019 
   15020 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15021 
   15022 	if (gate)
   15023 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   15024 	else
   15025 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   15026 
   15027 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15028 }
   15029 
   15030 static int
   15031 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   15032 {
   15033 	uint32_t fwsm, reg;
   15034 	int rv = 0;
   15035 
   15036 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15037 		device_xname(sc->sc_dev), __func__));
   15038 
   15039 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   15040 	wm_gate_hw_phy_config_ich8lan(sc, true);
   15041 
   15042 	/* Disable ULP */
   15043 	wm_ulp_disable(sc);
   15044 
   15045 	/* Acquire PHY semaphore */
   15046 	rv = sc->phy.acquire(sc);
   15047 	if (rv != 0) {
   15048 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   15049 		device_xname(sc->sc_dev), __func__));
   15050 		return -1;
   15051 	}
   15052 
   15053 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   15054 	 * inaccessible and resetting the PHY is not blocked, toggle the
   15055 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   15056 	 */
   15057 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15058 	switch (sc->sc_type) {
   15059 	case WM_T_PCH_LPT:
   15060 	case WM_T_PCH_SPT:
   15061 	case WM_T_PCH_CNP:
   15062 		if (wm_phy_is_accessible_pchlan(sc))
   15063 			break;
   15064 
   15065 		/* Before toggling LANPHYPC, see if PHY is accessible by
   15066 		 * forcing MAC to SMBus mode first.
   15067 		 */
   15068 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15069 		reg |= CTRL_EXT_FORCE_SMBUS;
   15070 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15071 #if 0
   15072 		/* XXX Isn't this required??? */
   15073 		CSR_WRITE_FLUSH(sc);
   15074 #endif
   15075 		/* Wait 50 milliseconds for MAC to finish any retries
   15076 		 * that it might be trying to perform from previous
   15077 		 * attempts to acknowledge any phy read requests.
   15078 		 */
   15079 		delay(50 * 1000);
   15080 		/* FALLTHROUGH */
   15081 	case WM_T_PCH2:
   15082 		if (wm_phy_is_accessible_pchlan(sc) == true)
   15083 			break;
   15084 		/* FALLTHROUGH */
   15085 	case WM_T_PCH:
   15086 		if (sc->sc_type == WM_T_PCH)
   15087 			if ((fwsm & FWSM_FW_VALID) != 0)
   15088 				break;
   15089 
   15090 		if (wm_phy_resetisblocked(sc) == true) {
   15091 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   15092 			break;
   15093 		}
   15094 
   15095 		/* Toggle LANPHYPC Value bit */
   15096 		wm_toggle_lanphypc_pch_lpt(sc);
   15097 
   15098 		if (sc->sc_type >= WM_T_PCH_LPT) {
   15099 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15100 				break;
   15101 
   15102 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   15103 			 * so ensure that the MAC is also out of SMBus mode
   15104 			 */
   15105 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15106 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15107 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15108 
   15109 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15110 				break;
   15111 			rv = -1;
   15112 		}
   15113 		break;
   15114 	default:
   15115 		break;
   15116 	}
   15117 
   15118 	/* Release semaphore */
   15119 	sc->phy.release(sc);
   15120 
   15121 	if (rv == 0) {
   15122 		/* Check to see if able to reset PHY.  Print error if not */
   15123 		if (wm_phy_resetisblocked(sc)) {
   15124 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15125 			goto out;
   15126 		}
   15127 
   15128 		/* Reset the PHY before any access to it.  Doing so, ensures
   15129 		 * that the PHY is in a known good state before we read/write
   15130 		 * PHY registers.  The generic reset is sufficient here,
   15131 		 * because we haven't determined the PHY type yet.
   15132 		 */
   15133 		if (wm_reset_phy(sc) != 0)
   15134 			goto out;
   15135 
   15136 		/* On a successful reset, possibly need to wait for the PHY
   15137 		 * to quiesce to an accessible state before returning control
   15138 		 * to the calling function.  If the PHY does not quiesce, then
   15139 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   15140 		 *  the PHY is in.
   15141 		 */
   15142 		if (wm_phy_resetisblocked(sc))
   15143 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15144 	}
   15145 
   15146 out:
   15147 	/* Ungate automatic PHY configuration on non-managed 82579 */
   15148 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   15149 		delay(10*1000);
   15150 		wm_gate_hw_phy_config_ich8lan(sc, false);
   15151 	}
   15152 
   15153 	return 0;
   15154 }
   15155 
   15156 static void
   15157 wm_init_manageability(struct wm_softc *sc)
   15158 {
   15159 
   15160 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15161 		device_xname(sc->sc_dev), __func__));
   15162 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15163 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   15164 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15165 
   15166 		/* Disable hardware interception of ARP */
   15167 		manc &= ~MANC_ARP_EN;
   15168 
   15169 		/* Enable receiving management packets to the host */
   15170 		if (sc->sc_type >= WM_T_82571) {
   15171 			manc |= MANC_EN_MNG2HOST;
   15172 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   15173 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   15174 		}
   15175 
   15176 		CSR_WRITE(sc, WMREG_MANC, manc);
   15177 	}
   15178 }
   15179 
   15180 static void
   15181 wm_release_manageability(struct wm_softc *sc)
   15182 {
   15183 
   15184 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15185 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15186 
   15187 		manc |= MANC_ARP_EN;
   15188 		if (sc->sc_type >= WM_T_82571)
   15189 			manc &= ~MANC_EN_MNG2HOST;
   15190 
   15191 		CSR_WRITE(sc, WMREG_MANC, manc);
   15192 	}
   15193 }
   15194 
   15195 static void
   15196 wm_get_wakeup(struct wm_softc *sc)
   15197 {
   15198 
   15199 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   15200 	switch (sc->sc_type) {
   15201 	case WM_T_82573:
   15202 	case WM_T_82583:
   15203 		sc->sc_flags |= WM_F_HAS_AMT;
   15204 		/* FALLTHROUGH */
   15205 	case WM_T_80003:
   15206 	case WM_T_82575:
   15207 	case WM_T_82576:
   15208 	case WM_T_82580:
   15209 	case WM_T_I350:
   15210 	case WM_T_I354:
   15211 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   15212 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   15213 		/* FALLTHROUGH */
   15214 	case WM_T_82541:
   15215 	case WM_T_82541_2:
   15216 	case WM_T_82547:
   15217 	case WM_T_82547_2:
   15218 	case WM_T_82571:
   15219 	case WM_T_82572:
   15220 	case WM_T_82574:
   15221 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15222 		break;
   15223 	case WM_T_ICH8:
   15224 	case WM_T_ICH9:
   15225 	case WM_T_ICH10:
   15226 	case WM_T_PCH:
   15227 	case WM_T_PCH2:
   15228 	case WM_T_PCH_LPT:
   15229 	case WM_T_PCH_SPT:
   15230 	case WM_T_PCH_CNP:
   15231 		sc->sc_flags |= WM_F_HAS_AMT;
   15232 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15233 		break;
   15234 	default:
   15235 		break;
   15236 	}
   15237 
   15238 	/* 1: HAS_MANAGE */
   15239 	if (wm_enable_mng_pass_thru(sc) != 0)
   15240 		sc->sc_flags |= WM_F_HAS_MANAGE;
   15241 
   15242 	/*
   15243 	 * Note that the WOL flags is set after the resetting of the eeprom
   15244 	 * stuff
   15245 	 */
   15246 }
   15247 
   15248 /*
   15249  * Unconfigure Ultra Low Power mode.
   15250  * Only for I217 and newer (see below).
   15251  */
   15252 static int
   15253 wm_ulp_disable(struct wm_softc *sc)
   15254 {
   15255 	uint32_t reg;
   15256 	uint16_t phyreg;
   15257 	int i = 0, rv = 0;
   15258 
   15259 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15260 		device_xname(sc->sc_dev), __func__));
   15261 	/* Exclude old devices */
   15262 	if ((sc->sc_type < WM_T_PCH_LPT)
   15263 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   15264 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   15265 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   15266 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   15267 		return 0;
   15268 
   15269 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   15270 		/* Request ME un-configure ULP mode in the PHY */
   15271 		reg = CSR_READ(sc, WMREG_H2ME);
   15272 		reg &= ~H2ME_ULP;
   15273 		reg |= H2ME_ENFORCE_SETTINGS;
   15274 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15275 
   15276 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   15277 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   15278 			if (i++ == 30) {
   15279 				device_printf(sc->sc_dev, "%s timed out\n",
   15280 				    __func__);
   15281 				return -1;
   15282 			}
   15283 			delay(10 * 1000);
   15284 		}
   15285 		reg = CSR_READ(sc, WMREG_H2ME);
   15286 		reg &= ~H2ME_ENFORCE_SETTINGS;
   15287 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15288 
   15289 		return 0;
   15290 	}
   15291 
   15292 	/* Acquire semaphore */
   15293 	rv = sc->phy.acquire(sc);
   15294 	if (rv != 0) {
   15295 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   15296 		device_xname(sc->sc_dev), __func__));
   15297 		return -1;
   15298 	}
   15299 
   15300 	/* Toggle LANPHYPC */
   15301 	wm_toggle_lanphypc_pch_lpt(sc);
   15302 
   15303 	/* Unforce SMBus mode in PHY */
   15304 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   15305 	if (rv != 0) {
   15306 		uint32_t reg2;
   15307 
   15308 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   15309 			__func__);
   15310 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   15311 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   15312 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   15313 		delay(50 * 1000);
   15314 
   15315 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   15316 		    &phyreg);
   15317 		if (rv != 0)
   15318 			goto release;
   15319 	}
   15320 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15321 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   15322 
   15323 	/* Unforce SMBus mode in MAC */
   15324 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15325 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   15326 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15327 
   15328 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   15329 	if (rv != 0)
   15330 		goto release;
   15331 	phyreg |= HV_PM_CTRL_K1_ENA;
   15332 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   15333 
   15334 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   15335 		&phyreg);
   15336 	if (rv != 0)
   15337 		goto release;
   15338 	phyreg &= ~(I218_ULP_CONFIG1_IND
   15339 	    | I218_ULP_CONFIG1_STICKY_ULP
   15340 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   15341 	    | I218_ULP_CONFIG1_WOL_HOST
   15342 	    | I218_ULP_CONFIG1_INBAND_EXIT
   15343 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   15344 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   15345 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   15346 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15347 	phyreg |= I218_ULP_CONFIG1_START;
   15348 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15349 
   15350 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15351 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   15352 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15353 
   15354 release:
   15355 	/* Release semaphore */
   15356 	sc->phy.release(sc);
   15357 	wm_gmii_reset(sc);
   15358 	delay(50 * 1000);
   15359 
   15360 	return rv;
   15361 }
   15362 
   15363 /* WOL in the newer chipset interfaces (pchlan) */
   15364 static int
   15365 wm_enable_phy_wakeup(struct wm_softc *sc)
   15366 {
   15367 	device_t dev = sc->sc_dev;
   15368 	uint32_t mreg, moff;
   15369 	uint16_t wuce, wuc, wufc, preg;
   15370 	int i, rv;
   15371 
   15372 	KASSERT(sc->sc_type >= WM_T_PCH);
   15373 
   15374 	/* Copy MAC RARs to PHY RARs */
   15375 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   15376 
   15377 	/* Activate PHY wakeup */
   15378 	rv = sc->phy.acquire(sc);
   15379 	if (rv != 0) {
   15380 		device_printf(dev, "%s: failed to acquire semaphore\n",
   15381 		    __func__);
   15382 		return rv;
   15383 	}
   15384 
   15385 	/*
   15386 	 * Enable access to PHY wakeup registers.
   15387 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   15388 	 */
   15389 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   15390 	if (rv != 0) {
   15391 		device_printf(dev,
   15392 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   15393 		goto release;
   15394 	}
   15395 
   15396 	/* Copy MAC MTA to PHY MTA */
   15397 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   15398 		uint16_t lo, hi;
   15399 
   15400 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   15401 		lo = (uint16_t)(mreg & 0xffff);
   15402 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   15403 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   15404 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   15405 	}
   15406 
   15407 	/* Configure PHY Rx Control register */
   15408 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   15409 	mreg = CSR_READ(sc, WMREG_RCTL);
   15410 	if (mreg & RCTL_UPE)
   15411 		preg |= BM_RCTL_UPE;
   15412 	if (mreg & RCTL_MPE)
   15413 		preg |= BM_RCTL_MPE;
   15414 	preg &= ~(BM_RCTL_MO_MASK);
   15415 	moff = __SHIFTOUT(mreg, RCTL_MO);
   15416 	if (moff != 0)
   15417 		preg |= moff << BM_RCTL_MO_SHIFT;
   15418 	if (mreg & RCTL_BAM)
   15419 		preg |= BM_RCTL_BAM;
   15420 	if (mreg & RCTL_PMCF)
   15421 		preg |= BM_RCTL_PMCF;
   15422 	mreg = CSR_READ(sc, WMREG_CTRL);
   15423 	if (mreg & CTRL_RFCE)
   15424 		preg |= BM_RCTL_RFCE;
   15425 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   15426 
   15427 	wuc = WUC_APME | WUC_PME_EN;
   15428 	wufc = WUFC_MAG;
   15429 	/* Enable PHY wakeup in MAC register */
   15430 	CSR_WRITE(sc, WMREG_WUC,
   15431 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   15432 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   15433 
   15434 	/* Configure and enable PHY wakeup in PHY registers */
   15435 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   15436 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   15437 
   15438 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   15439 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15440 
   15441 release:
   15442 	sc->phy.release(sc);
   15443 
   15444 	return 0;
   15445 }
   15446 
   15447 /* Power down workaround on D3 */
   15448 static void
   15449 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   15450 {
   15451 	uint32_t reg;
   15452 	uint16_t phyreg;
   15453 	int i;
   15454 
   15455 	for (i = 0; i < 2; i++) {
   15456 		/* Disable link */
   15457 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15458 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15459 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15460 
   15461 		/*
   15462 		 * Call gig speed drop workaround on Gig disable before
   15463 		 * accessing any PHY registers
   15464 		 */
   15465 		if (sc->sc_type == WM_T_ICH8)
   15466 			wm_gig_downshift_workaround_ich8lan(sc);
   15467 
   15468 		/* Write VR power-down enable */
   15469 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15470 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15471 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   15472 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   15473 
   15474 		/* Read it back and test */
   15475 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15476 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15477 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   15478 			break;
   15479 
   15480 		/* Issue PHY reset and repeat at most one more time */
   15481 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   15482 	}
   15483 }
   15484 
   15485 /*
   15486  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   15487  *  @sc: pointer to the HW structure
   15488  *
   15489  *  During S0 to Sx transition, it is possible the link remains at gig
   15490  *  instead of negotiating to a lower speed.  Before going to Sx, set
   15491  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   15492  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   15493  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   15494  *  needs to be written.
   15495  *  Parts that support (and are linked to a partner which support) EEE in
   15496  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   15497  *  than 10Mbps w/o EEE.
   15498  */
   15499 static void
   15500 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   15501 {
   15502 	device_t dev = sc->sc_dev;
   15503 	struct ethercom *ec = &sc->sc_ethercom;
   15504 	uint32_t phy_ctrl;
   15505 	int rv;
   15506 
   15507 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   15508 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   15509 
   15510 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   15511 
   15512 	if (sc->sc_phytype == WMPHY_I217) {
   15513 		uint16_t devid = sc->sc_pcidevid;
   15514 
   15515 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   15516 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   15517 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   15518 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   15519 		    (sc->sc_type >= WM_T_PCH_SPT))
   15520 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   15521 			    CSR_READ(sc, WMREG_FEXTNVM6)
   15522 			    & ~FEXTNVM6_REQ_PLL_CLK);
   15523 
   15524 		if (sc->phy.acquire(sc) != 0)
   15525 			goto out;
   15526 
   15527 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15528 			uint16_t eee_advert;
   15529 
   15530 			rv = wm_read_emi_reg_locked(dev,
   15531 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   15532 			if (rv)
   15533 				goto release;
   15534 
   15535 			/*
   15536 			 * Disable LPLU if both link partners support 100BaseT
   15537 			 * EEE and 100Full is advertised on both ends of the
   15538 			 * link, and enable Auto Enable LPI since there will
   15539 			 * be no driver to enable LPI while in Sx.
   15540 			 */
   15541 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   15542 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   15543 				uint16_t anar, phy_reg;
   15544 
   15545 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   15546 				    &anar);
   15547 				if (anar & ANAR_TX_FD) {
   15548 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   15549 					    PHY_CTRL_NOND0A_LPLU);
   15550 
   15551 					/* Set Auto Enable LPI after link up */
   15552 					sc->phy.readreg_locked(dev, 2,
   15553 					    I217_LPI_GPIO_CTRL, &phy_reg);
   15554 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15555 					sc->phy.writereg_locked(dev, 2,
   15556 					    I217_LPI_GPIO_CTRL, phy_reg);
   15557 				}
   15558 			}
   15559 		}
   15560 
   15561 		/*
   15562 		 * For i217 Intel Rapid Start Technology support,
   15563 		 * when the system is going into Sx and no manageability engine
   15564 		 * is present, the driver must configure proxy to reset only on
   15565 		 * power good.	LPI (Low Power Idle) state must also reset only
   15566 		 * on power good, as well as the MTA (Multicast table array).
   15567 		 * The SMBus release must also be disabled on LCD reset.
   15568 		 */
   15569 
   15570 		/*
   15571 		 * Enable MTA to reset for Intel Rapid Start Technology
   15572 		 * Support
   15573 		 */
   15574 
   15575 release:
   15576 		sc->phy.release(sc);
   15577 	}
   15578 out:
   15579 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   15580 
   15581 	if (sc->sc_type == WM_T_ICH8)
   15582 		wm_gig_downshift_workaround_ich8lan(sc);
   15583 
   15584 	if (sc->sc_type >= WM_T_PCH) {
   15585 		wm_oem_bits_config_ich8lan(sc, false);
   15586 
   15587 		/* Reset PHY to activate OEM bits on 82577/8 */
   15588 		if (sc->sc_type == WM_T_PCH)
   15589 			wm_reset_phy(sc);
   15590 
   15591 		if (sc->phy.acquire(sc) != 0)
   15592 			return;
   15593 		wm_write_smbus_addr(sc);
   15594 		sc->phy.release(sc);
   15595 	}
   15596 }
   15597 
   15598 /*
   15599  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   15600  *  @sc: pointer to the HW structure
   15601  *
   15602  *  During Sx to S0 transitions on non-managed devices or managed devices
   15603  *  on which PHY resets are not blocked, if the PHY registers cannot be
   15604  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   15605  *  the PHY.
   15606  *  On i217, setup Intel Rapid Start Technology.
   15607  */
   15608 static int
   15609 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   15610 {
   15611 	device_t dev = sc->sc_dev;
   15612 	int rv;
   15613 
   15614 	if (sc->sc_type < WM_T_PCH2)
   15615 		return 0;
   15616 
   15617 	rv = wm_init_phy_workarounds_pchlan(sc);
   15618 	if (rv != 0)
   15619 		return -1;
   15620 
   15621 	/* For i217 Intel Rapid Start Technology support when the system
   15622 	 * is transitioning from Sx and no manageability engine is present
   15623 	 * configure SMBus to restore on reset, disable proxy, and enable
   15624 	 * the reset on MTA (Multicast table array).
   15625 	 */
   15626 	if (sc->sc_phytype == WMPHY_I217) {
   15627 		uint16_t phy_reg;
   15628 
   15629 		if (sc->phy.acquire(sc) != 0)
   15630 			return -1;
   15631 
   15632 		/* Clear Auto Enable LPI after link up */
   15633 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   15634 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15635 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   15636 
   15637 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15638 			/* Restore clear on SMB if no manageability engine
   15639 			 * is present
   15640 			 */
   15641 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   15642 			    &phy_reg);
   15643 			if (rv != 0)
   15644 				goto release;
   15645 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   15646 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   15647 
   15648 			/* Disable Proxy */
   15649 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   15650 		}
   15651 		/* Enable reset on MTA */
   15652 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   15653 		if (rv != 0)
   15654 			goto release;
   15655 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   15656 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   15657 
   15658 release:
   15659 		sc->phy.release(sc);
   15660 		return rv;
   15661 	}
   15662 
   15663 	return 0;
   15664 }
   15665 
   15666 static void
   15667 wm_enable_wakeup(struct wm_softc *sc)
   15668 {
   15669 	uint32_t reg, pmreg;
   15670 	pcireg_t pmode;
   15671 	int rv = 0;
   15672 
   15673 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15674 		device_xname(sc->sc_dev), __func__));
   15675 
   15676 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15677 	    &pmreg, NULL) == 0)
   15678 		return;
   15679 
   15680 	if ((sc->sc_flags & WM_F_WOL) == 0)
   15681 		goto pme;
   15682 
   15683 	/* Advertise the wakeup capability */
   15684 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15685 	    | CTRL_SWDPIN(3));
   15686 
   15687 	/* Keep the laser running on fiber adapters */
   15688 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15689 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15690 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15691 		reg |= CTRL_EXT_SWDPIN(3);
   15692 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15693 	}
   15694 
   15695 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15696 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15697 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15698 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15699 		wm_suspend_workarounds_ich8lan(sc);
   15700 
   15701 #if 0	/* For the multicast packet */
   15702 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15703 	reg |= WUFC_MC;
   15704 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15705 #endif
   15706 
   15707 	if (sc->sc_type >= WM_T_PCH) {
   15708 		rv = wm_enable_phy_wakeup(sc);
   15709 		if (rv != 0)
   15710 			goto pme;
   15711 	} else {
   15712 		/* Enable wakeup by the MAC */
   15713 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15714 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15715 	}
   15716 
   15717 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15718 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15719 		|| (sc->sc_type == WM_T_PCH2))
   15720 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15721 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15722 
   15723 pme:
   15724 	/* Request PME */
   15725 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15726 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   15727 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15728 		/* For WOL */
   15729 		pmode |= PCI_PMCSR_PME_EN;
   15730 	} else {
   15731 		/* Disable WOL */
   15732 		pmode &= ~PCI_PMCSR_PME_EN;
   15733 	}
   15734 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15735 }
   15736 
   15737 /* Disable ASPM L0s and/or L1 for workaround */
   15738 static void
   15739 wm_disable_aspm(struct wm_softc *sc)
   15740 {
   15741 	pcireg_t reg, mask = 0;
   15742 	unsigned const char *str = "";
   15743 
   15744 	/*
   15745 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15746 	 * space.
   15747 	 */
   15748 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15749 		return;
   15750 
   15751 	switch (sc->sc_type) {
   15752 	case WM_T_82571:
   15753 	case WM_T_82572:
   15754 		/*
   15755 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15756 		 * State Power management L1 State (ASPM L1).
   15757 		 */
   15758 		mask = PCIE_LCSR_ASPM_L1;
   15759 		str = "L1 is";
   15760 		break;
   15761 	case WM_T_82573:
   15762 	case WM_T_82574:
   15763 	case WM_T_82583:
   15764 		/*
   15765 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15766 		 *
   15767 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15768 		 * some chipset.  The document of 82574 and 82583 says that
   15769 		 * disabling L0s with some specific chipset is sufficient,
   15770 		 * but we follow as of the Intel em driver does.
   15771 		 *
   15772 		 * References:
   15773 		 * Errata 8 of the Specification Update of i82573.
   15774 		 * Errata 20 of the Specification Update of i82574.
   15775 		 * Errata 9 of the Specification Update of i82583.
   15776 		 */
   15777 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15778 		str = "L0s and L1 are";
   15779 		break;
   15780 	default:
   15781 		return;
   15782 	}
   15783 
   15784 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15785 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15786 	reg &= ~mask;
   15787 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15788 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15789 
   15790 	/* Print only in wm_attach() */
   15791 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15792 		aprint_verbose_dev(sc->sc_dev,
   15793 		    "ASPM %s disabled to workaround the errata.\n", str);
   15794 }
   15795 
   15796 /* LPLU */
   15797 
   15798 static void
   15799 wm_lplu_d0_disable(struct wm_softc *sc)
   15800 {
   15801 	struct mii_data *mii = &sc->sc_mii;
   15802 	uint32_t reg;
   15803 	uint16_t phyval;
   15804 
   15805 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15806 		device_xname(sc->sc_dev), __func__));
   15807 
   15808 	if (sc->sc_phytype == WMPHY_IFE)
   15809 		return;
   15810 
   15811 	switch (sc->sc_type) {
   15812 	case WM_T_82571:
   15813 	case WM_T_82572:
   15814 	case WM_T_82573:
   15815 	case WM_T_82575:
   15816 	case WM_T_82576:
   15817 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
   15818 		phyval &= ~PMR_D0_LPLU;
   15819 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
   15820 		break;
   15821 	case WM_T_82580:
   15822 	case WM_T_I350:
   15823 	case WM_T_I210:
   15824 	case WM_T_I211:
   15825 		reg = CSR_READ(sc, WMREG_PHPM);
   15826 		reg &= ~PHPM_D0A_LPLU;
   15827 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15828 		break;
   15829 	case WM_T_82574:
   15830 	case WM_T_82583:
   15831 	case WM_T_ICH8:
   15832 	case WM_T_ICH9:
   15833 	case WM_T_ICH10:
   15834 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15835 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15836 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15837 		CSR_WRITE_FLUSH(sc);
   15838 		break;
   15839 	case WM_T_PCH:
   15840 	case WM_T_PCH2:
   15841 	case WM_T_PCH_LPT:
   15842 	case WM_T_PCH_SPT:
   15843 	case WM_T_PCH_CNP:
   15844 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15845 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15846 		if (wm_phy_resetisblocked(sc) == false)
   15847 			phyval |= HV_OEM_BITS_ANEGNOW;
   15848 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15849 		break;
   15850 	default:
   15851 		break;
   15852 	}
   15853 }
   15854 
   15855 /* EEE */
   15856 
   15857 static int
   15858 wm_set_eee_i350(struct wm_softc *sc)
   15859 {
   15860 	struct ethercom *ec = &sc->sc_ethercom;
   15861 	uint32_t ipcnfg, eeer;
   15862 	uint32_t ipcnfg_mask
   15863 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15864 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15865 
   15866 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15867 
   15868 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15869 	eeer = CSR_READ(sc, WMREG_EEER);
   15870 
   15871 	/* Enable or disable per user setting */
   15872 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15873 		ipcnfg |= ipcnfg_mask;
   15874 		eeer |= eeer_mask;
   15875 	} else {
   15876 		ipcnfg &= ~ipcnfg_mask;
   15877 		eeer &= ~eeer_mask;
   15878 	}
   15879 
   15880 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15881 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15882 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15883 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15884 
   15885 	return 0;
   15886 }
   15887 
   15888 static int
   15889 wm_set_eee_pchlan(struct wm_softc *sc)
   15890 {
   15891 	device_t dev = sc->sc_dev;
   15892 	struct ethercom *ec = &sc->sc_ethercom;
   15893 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15894 	int rv = 0;
   15895 
   15896 	switch (sc->sc_phytype) {
   15897 	case WMPHY_82579:
   15898 		lpa = I82579_EEE_LP_ABILITY;
   15899 		pcs_status = I82579_EEE_PCS_STATUS;
   15900 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15901 		break;
   15902 	case WMPHY_I217:
   15903 		lpa = I217_EEE_LP_ABILITY;
   15904 		pcs_status = I217_EEE_PCS_STATUS;
   15905 		adv_addr = I217_EEE_ADVERTISEMENT;
   15906 		break;
   15907 	default:
   15908 		return 0;
   15909 	}
   15910 
   15911 	if (sc->phy.acquire(sc)) {
   15912 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15913 		return 0;
   15914 	}
   15915 
   15916 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15917 	if (rv != 0)
   15918 		goto release;
   15919 
   15920 	/* Clear bits that enable EEE in various speeds */
   15921 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15922 
   15923 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15924 		/* Save off link partner's EEE ability */
   15925 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15926 		if (rv != 0)
   15927 			goto release;
   15928 
   15929 		/* Read EEE advertisement */
   15930 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15931 			goto release;
   15932 
   15933 		/*
   15934 		 * Enable EEE only for speeds in which the link partner is
   15935 		 * EEE capable and for which we advertise EEE.
   15936 		 */
   15937 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15938 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15939 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15940 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15941 			if ((data & ANLPAR_TX_FD) != 0)
   15942 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15943 			else {
   15944 				/*
   15945 				 * EEE is not supported in 100Half, so ignore
   15946 				 * partner's EEE in 100 ability if full-duplex
   15947 				 * is not advertised.
   15948 				 */
   15949 				sc->eee_lp_ability
   15950 				    &= ~AN_EEEADVERT_100_TX;
   15951 			}
   15952 		}
   15953 	}
   15954 
   15955 	if (sc->sc_phytype == WMPHY_82579) {
   15956 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15957 		if (rv != 0)
   15958 			goto release;
   15959 
   15960 		data &= ~I82579_LPI_PLL_SHUT_100;
   15961 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15962 	}
   15963 
   15964 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15965 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15966 		goto release;
   15967 
   15968 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15969 release:
   15970 	sc->phy.release(sc);
   15971 
   15972 	return rv;
   15973 }
   15974 
   15975 static int
   15976 wm_set_eee(struct wm_softc *sc)
   15977 {
   15978 	struct ethercom *ec = &sc->sc_ethercom;
   15979 
   15980 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15981 		return 0;
   15982 
   15983 	if (sc->sc_type == WM_T_I354) {
   15984 		/* I354 uses an external PHY */
   15985 		return 0; /* not yet */
   15986 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15987 		return wm_set_eee_i350(sc);
   15988 	else if (sc->sc_type >= WM_T_PCH2)
   15989 		return wm_set_eee_pchlan(sc);
   15990 
   15991 	return 0;
   15992 }
   15993 
   15994 /*
   15995  * Workarounds (mainly PHY related).
   15996  * Basically, PHY's workarounds are in the PHY drivers.
   15997  */
   15998 
   15999 /* Work-around for 82566 Kumeran PCS lock loss */
   16000 static int
   16001 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   16002 {
   16003 	struct mii_data *mii = &sc->sc_mii;
   16004 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16005 	int i, reg, rv;
   16006 	uint16_t phyreg;
   16007 
   16008 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16009 		device_xname(sc->sc_dev), __func__));
   16010 
   16011 	/* If the link is not up, do nothing */
   16012 	if ((status & STATUS_LU) == 0)
   16013 		return 0;
   16014 
   16015 	/* Nothing to do if the link is other than 1Gbps */
   16016 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   16017 		return 0;
   16018 
   16019 	for (i = 0; i < 10; i++) {
   16020 		/* read twice */
   16021 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16022 		if (rv != 0)
   16023 			return rv;
   16024 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16025 		if (rv != 0)
   16026 			return rv;
   16027 
   16028 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   16029 			goto out;	/* GOOD! */
   16030 
   16031 		/* Reset the PHY */
   16032 		wm_reset_phy(sc);
   16033 		delay(5*1000);
   16034 	}
   16035 
   16036 	/* Disable GigE link negotiation */
   16037 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16038 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   16039 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16040 
   16041 	/*
   16042 	 * Call gig speed drop workaround on Gig disable before accessing
   16043 	 * any PHY registers.
   16044 	 */
   16045 	wm_gig_downshift_workaround_ich8lan(sc);
   16046 
   16047 out:
   16048 	return 0;
   16049 }
   16050 
   16051 /*
   16052  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   16053  *  @sc: pointer to the HW structure
   16054  *
   16055  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   16056  *  LPLU, Gig disable, MDIC PHY reset):
   16057  *    1) Set Kumeran Near-end loopback
   16058  *    2) Clear Kumeran Near-end loopback
   16059  *  Should only be called for ICH8[m] devices with any 1G Phy.
   16060  */
   16061 static void
   16062 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   16063 {
   16064 	uint16_t kmreg;
   16065 
   16066 	/* Only for igp3 */
   16067 	if (sc->sc_phytype == WMPHY_IGP_3) {
   16068 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   16069 			return;
   16070 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   16071 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   16072 			return;
   16073 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   16074 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   16075 	}
   16076 }
   16077 
   16078 /*
   16079  * Workaround for pch's PHYs
   16080  * XXX should be moved to new PHY driver?
   16081  */
   16082 static int
   16083 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16084 {
   16085 	device_t dev = sc->sc_dev;
   16086 	struct mii_data *mii = &sc->sc_mii;
   16087 	struct mii_softc *child;
   16088 	uint16_t phy_data, phyrev = 0;
   16089 	int phytype = sc->sc_phytype;
   16090 	int rv;
   16091 
   16092 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16093 		device_xname(dev), __func__));
   16094 	KASSERT(sc->sc_type == WM_T_PCH);
   16095 
   16096 	/* Set MDIO slow mode before any other MDIO access */
   16097 	if (phytype == WMPHY_82577)
   16098 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   16099 			return rv;
   16100 
   16101 	child = LIST_FIRST(&mii->mii_phys);
   16102 	if (child != NULL)
   16103 		phyrev = child->mii_mpd_rev;
   16104 
   16105 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   16106 	if ((child != NULL) &&
   16107 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   16108 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   16109 		/* Disable generation of early preamble (0x4431) */
   16110 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16111 		    &phy_data);
   16112 		if (rv != 0)
   16113 			return rv;
   16114 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   16115 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   16116 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16117 		    phy_data);
   16118 		if (rv != 0)
   16119 			return rv;
   16120 
   16121 		/* Preamble tuning for SSC */
   16122 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   16123 		if (rv != 0)
   16124 			return rv;
   16125 	}
   16126 
   16127 	/* 82578 */
   16128 	if (phytype == WMPHY_82578) {
   16129 		/*
   16130 		 * Return registers to default by doing a soft reset then
   16131 		 * writing 0x3140 to the control register
   16132 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   16133 		 */
   16134 		if ((child != NULL) && (phyrev < 2)) {
   16135 			PHY_RESET(child);
   16136 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   16137 			if (rv != 0)
   16138 				return rv;
   16139 		}
   16140 	}
   16141 
   16142 	/* Select page 0 */
   16143 	if ((rv = sc->phy.acquire(sc)) != 0)
   16144 		return rv;
   16145 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
   16146 	sc->phy.release(sc);
   16147 	if (rv != 0)
   16148 		return rv;
   16149 
   16150 	/*
   16151 	 * Configure the K1 Si workaround during phy reset assuming there is
   16152 	 * link so that it disables K1 if link is in 1Gbps.
   16153 	 */
   16154 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   16155 		return rv;
   16156 
   16157 	/* Workaround for link disconnects on a busy hub in half duplex */
   16158 	rv = sc->phy.acquire(sc);
   16159 	if (rv)
   16160 		return rv;
   16161 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   16162 	if (rv)
   16163 		goto release;
   16164 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   16165 	    phy_data & 0x00ff);
   16166 	if (rv)
   16167 		goto release;
   16168 
   16169 	/* Set MSE higher to enable link to stay up when noise is high */
   16170 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   16171 release:
   16172 	sc->phy.release(sc);
   16173 
   16174 	return rv;
   16175 }
   16176 
   16177 /*
   16178  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   16179  *  @sc:   pointer to the HW structure
   16180  */
   16181 static void
   16182 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   16183 {
   16184 
   16185 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16186 		device_xname(sc->sc_dev), __func__));
   16187 
   16188 	if (sc->phy.acquire(sc) != 0)
   16189 		return;
   16190 
   16191 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16192 
   16193 	sc->phy.release(sc);
   16194 }
   16195 
   16196 static void
   16197 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
   16198 {
   16199 	device_t dev = sc->sc_dev;
   16200 	uint32_t mac_reg;
   16201 	uint16_t i, wuce;
   16202 	int count;
   16203 
   16204 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16205 		device_xname(dev), __func__));
   16206 
   16207 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   16208 		return;
   16209 
   16210 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   16211 	count = wm_rar_count(sc);
   16212 	for (i = 0; i < count; i++) {
   16213 		uint16_t lo, hi;
   16214 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16215 		lo = (uint16_t)(mac_reg & 0xffff);
   16216 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   16217 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   16218 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   16219 
   16220 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16221 		lo = (uint16_t)(mac_reg & 0xffff);
   16222 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   16223 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   16224 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   16225 	}
   16226 
   16227 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   16228 }
   16229 
   16230 /*
   16231  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
   16232  *  with 82579 PHY
   16233  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
   16234  */
   16235 static int
   16236 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
   16237 {
   16238 	device_t dev = sc->sc_dev;
   16239 	int rar_count;
   16240 	int rv;
   16241 	uint32_t mac_reg;
   16242 	uint16_t dft_ctrl, data;
   16243 	uint16_t i;
   16244 
   16245 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16246 		device_xname(dev), __func__));
   16247 
   16248 	if (sc->sc_type < WM_T_PCH2)
   16249 		return 0;
   16250 
   16251 	/* Acquire PHY semaphore */
   16252 	rv = sc->phy.acquire(sc);
   16253 	if (rv != 0)
   16254 		return rv;
   16255 
   16256 	/* Disable Rx path while enabling/disabling workaround */
   16257 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
   16258 	if (rv != 0)
   16259 		goto out;
   16260 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16261 	    dft_ctrl | (1 << 14));
   16262 	if (rv != 0)
   16263 		goto out;
   16264 
   16265 	if (enable) {
   16266 		/* Write Rx addresses (rar_entry_count for RAL/H, and
   16267 		 * SHRAL/H) and initial CRC values to the MAC
   16268 		 */
   16269 		rar_count = wm_rar_count(sc);
   16270 		for (i = 0; i < rar_count; i++) {
   16271 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
   16272 			uint32_t addr_high, addr_low;
   16273 
   16274 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16275 			if (!(addr_high & RAL_AV))
   16276 				continue;
   16277 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16278 			mac_addr[0] = (addr_low & 0xFF);
   16279 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
   16280 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
   16281 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
   16282 			mac_addr[4] = (addr_high & 0xFF);
   16283 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
   16284 
   16285 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
   16286 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
   16287 		}
   16288 
   16289 		/* Write Rx addresses to the PHY */
   16290 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16291 	}
   16292 
   16293 	/*
   16294 	 * If enable ==
   16295 	 *	true: Enable jumbo frame workaround in the MAC.
   16296 	 *	false: Write MAC register values back to h/w defaults.
   16297 	 */
   16298 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
   16299 	if (enable) {
   16300 		mac_reg &= ~(1 << 14);
   16301 		mac_reg |= (7 << 15);
   16302 	} else
   16303 		mac_reg &= ~(0xf << 14);
   16304 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
   16305 
   16306 	mac_reg = CSR_READ(sc, WMREG_RCTL);
   16307 	if (enable) {
   16308 		mac_reg |= RCTL_SECRC;
   16309 		sc->sc_rctl |= RCTL_SECRC;
   16310 		sc->sc_flags |= WM_F_CRC_STRIP;
   16311 	} else {
   16312 		mac_reg &= ~RCTL_SECRC;
   16313 		sc->sc_rctl &= ~RCTL_SECRC;
   16314 		sc->sc_flags &= ~WM_F_CRC_STRIP;
   16315 	}
   16316 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
   16317 
   16318 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
   16319 	if (rv != 0)
   16320 		goto out;
   16321 	if (enable)
   16322 		data |= 1 << 0;
   16323 	else
   16324 		data &= ~(1 << 0);
   16325 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
   16326 	if (rv != 0)
   16327 		goto out;
   16328 
   16329 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
   16330 	if (rv != 0)
   16331 		goto out;
   16332 	/*
   16333 	 * XXX FreeBSD and Linux do the same thing that they set the same value
   16334 	 * on both the enable case and the disable case. Is it correct?
   16335 	 */
   16336 	data &= ~(0xf << 8);
   16337 	data |= (0xb << 8);
   16338 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
   16339 	if (rv != 0)
   16340 		goto out;
   16341 
   16342 	/*
   16343 	 * If enable ==
   16344 	 *	true: Enable jumbo frame workaround in the PHY.
   16345 	 *	false: Write PHY register values back to h/w defaults.
   16346 	 */
   16347 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
   16348 	if (rv != 0)
   16349 		goto out;
   16350 	data &= ~(0x7F << 5);
   16351 	if (enable)
   16352 		data |= (0x37 << 5);
   16353 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
   16354 	if (rv != 0)
   16355 		goto out;
   16356 
   16357 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
   16358 	if (rv != 0)
   16359 		goto out;
   16360 	if (enable)
   16361 		data &= ~(1 << 13);
   16362 	else
   16363 		data |= (1 << 13);
   16364 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
   16365 	if (rv != 0)
   16366 		goto out;
   16367 
   16368 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
   16369 	if (rv != 0)
   16370 		goto out;
   16371 	data &= ~(0x3FF << 2);
   16372 	if (enable)
   16373 		data |= (I82579_TX_PTR_GAP << 2);
   16374 	else
   16375 		data |= (0x8 << 2);
   16376 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
   16377 	if (rv != 0)
   16378 		goto out;
   16379 
   16380 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
   16381 	    enable ? 0xf100 : 0x7e00);
   16382 	if (rv != 0)
   16383 		goto out;
   16384 
   16385 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
   16386 	if (rv != 0)
   16387 		goto out;
   16388 	if (enable)
   16389 		data |= 1 << 10;
   16390 	else
   16391 		data &= ~(1 << 10);
   16392 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
   16393 	if (rv != 0)
   16394 		goto out;
   16395 
   16396 	/* Re-enable Rx path after enabling/disabling workaround */
   16397 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16398 	    dft_ctrl & ~(1 << 14));
   16399 
   16400 out:
   16401 	sc->phy.release(sc);
   16402 
   16403 	return rv;
   16404 }
   16405 
   16406 /*
   16407  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   16408  *  done after every PHY reset.
   16409  */
   16410 static int
   16411 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16412 {
   16413 	device_t dev = sc->sc_dev;
   16414 	int rv;
   16415 
   16416 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16417 		device_xname(dev), __func__));
   16418 	KASSERT(sc->sc_type == WM_T_PCH2);
   16419 
   16420 	/* Set MDIO slow mode before any other MDIO access */
   16421 	rv = wm_set_mdio_slow_mode_hv(sc);
   16422 	if (rv != 0)
   16423 		return rv;
   16424 
   16425 	rv = sc->phy.acquire(sc);
   16426 	if (rv != 0)
   16427 		return rv;
   16428 	/* Set MSE higher to enable link to stay up when noise is high */
   16429 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   16430 	if (rv != 0)
   16431 		goto release;
   16432 	/* Drop link after 5 times MSE threshold was reached */
   16433 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   16434 release:
   16435 	sc->phy.release(sc);
   16436 
   16437 	return rv;
   16438 }
   16439 
   16440 /**
   16441  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   16442  *  @link: link up bool flag
   16443  *
   16444  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   16445  *  preventing further DMA write requests.  Workaround the issue by disabling
   16446  *  the de-assertion of the clock request when in 1Gpbs mode.
   16447  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   16448  *  speeds in order to avoid Tx hangs.
   16449  **/
   16450 static int
   16451 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   16452 {
   16453 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   16454 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16455 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   16456 	uint16_t phyreg;
   16457 
   16458 	if (link && (speed == STATUS_SPEED_1000)) {
   16459 		sc->phy.acquire(sc);
   16460 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16461 		    &phyreg);
   16462 		if (rv != 0)
   16463 			goto release;
   16464 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16465 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   16466 		if (rv != 0)
   16467 			goto release;
   16468 		delay(20);
   16469 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   16470 
   16471 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16472 		    &phyreg);
   16473 release:
   16474 		sc->phy.release(sc);
   16475 		return rv;
   16476 	}
   16477 
   16478 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   16479 
   16480 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   16481 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   16482 	    || !link
   16483 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   16484 		goto update_fextnvm6;
   16485 
   16486 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   16487 
   16488 	/* Clear link status transmit timeout */
   16489 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   16490 	if (speed == STATUS_SPEED_100) {
   16491 		/* Set inband Tx timeout to 5x10us for 100Half */
   16492 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16493 
   16494 		/* Do not extend the K1 entry latency for 100Half */
   16495 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16496 	} else {
   16497 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   16498 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16499 
   16500 		/* Extend the K1 entry latency for 10 Mbps */
   16501 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16502 	}
   16503 
   16504 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   16505 
   16506 update_fextnvm6:
   16507 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   16508 	return 0;
   16509 }
   16510 
   16511 /*
   16512  *  wm_k1_gig_workaround_hv - K1 Si workaround
   16513  *  @sc:   pointer to the HW structure
   16514  *  @link: link up bool flag
   16515  *
   16516  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   16517  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   16518  *  If link is down, the function will restore the default K1 setting located
   16519  *  in the NVM.
   16520  */
   16521 static int
   16522 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   16523 {
   16524 	int k1_enable = sc->sc_nvm_k1_enabled;
   16525 
   16526 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16527 		device_xname(sc->sc_dev), __func__));
   16528 
   16529 	if (sc->phy.acquire(sc) != 0)
   16530 		return -1;
   16531 
   16532 	if (link) {
   16533 		k1_enable = 0;
   16534 
   16535 		/* Link stall fix for link up */
   16536 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16537 		    0x0100);
   16538 	} else {
   16539 		/* Link stall fix for link down */
   16540 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16541 		    0x4100);
   16542 	}
   16543 
   16544 	wm_configure_k1_ich8lan(sc, k1_enable);
   16545 	sc->phy.release(sc);
   16546 
   16547 	return 0;
   16548 }
   16549 
   16550 /*
   16551  *  wm_k1_workaround_lv - K1 Si workaround
   16552  *  @sc:   pointer to the HW structure
   16553  *
   16554  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   16555  *  Disable K1 for 1000 and 100 speeds
   16556  */
   16557 static int
   16558 wm_k1_workaround_lv(struct wm_softc *sc)
   16559 {
   16560 	uint32_t reg;
   16561 	uint16_t phyreg;
   16562 	int rv;
   16563 
   16564 	if (sc->sc_type != WM_T_PCH2)
   16565 		return 0;
   16566 
   16567 	/* Set K1 beacon duration based on 10Mbps speed */
   16568 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   16569 	if (rv != 0)
   16570 		return rv;
   16571 
   16572 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   16573 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   16574 		if (phyreg &
   16575 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   16576 			/* LV 1G/100 Packet drop issue wa  */
   16577 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   16578 			    &phyreg);
   16579 			if (rv != 0)
   16580 				return rv;
   16581 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   16582 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   16583 			    phyreg);
   16584 			if (rv != 0)
   16585 				return rv;
   16586 		} else {
   16587 			/* For 10Mbps */
   16588 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   16589 			reg &= ~FEXTNVM4_BEACON_DURATION;
   16590 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   16591 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   16592 		}
   16593 	}
   16594 
   16595 	return 0;
   16596 }
   16597 
   16598 /*
   16599  *  wm_link_stall_workaround_hv - Si workaround
   16600  *  @sc: pointer to the HW structure
   16601  *
   16602  *  This function works around a Si bug where the link partner can get
   16603  *  a link up indication before the PHY does. If small packets are sent
   16604  *  by the link partner they can be placed in the packet buffer without
   16605  *  being properly accounted for by the PHY and will stall preventing
   16606  *  further packets from being received.  The workaround is to clear the
   16607  *  packet buffer after the PHY detects link up.
   16608  */
   16609 static int
   16610 wm_link_stall_workaround_hv(struct wm_softc *sc)
   16611 {
   16612 	uint16_t phyreg;
   16613 
   16614 	if (sc->sc_phytype != WMPHY_82578)
   16615 		return 0;
   16616 
   16617 	/* Do not apply workaround if in PHY loopback bit 14 set */
   16618 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   16619 	if ((phyreg & BMCR_LOOP) != 0)
   16620 		return 0;
   16621 
   16622 	/* Check if link is up and at 1Gbps */
   16623 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   16624 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16625 	    | BM_CS_STATUS_SPEED_MASK;
   16626 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16627 		| BM_CS_STATUS_SPEED_1000))
   16628 		return 0;
   16629 
   16630 	delay(200 * 1000);	/* XXX too big */
   16631 
   16632 	/* Flush the packets in the fifo buffer */
   16633 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16634 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   16635 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16636 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   16637 
   16638 	return 0;
   16639 }
   16640 
   16641 static int
   16642 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   16643 {
   16644 	int rv;
   16645 	uint16_t reg;
   16646 
   16647 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   16648 	if (rv != 0)
   16649 		return rv;
   16650 
   16651 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   16652 	    reg | HV_KMRN_MDIO_SLOW);
   16653 }
   16654 
   16655 /*
   16656  *  wm_configure_k1_ich8lan - Configure K1 power state
   16657  *  @sc: pointer to the HW structure
   16658  *  @enable: K1 state to configure
   16659  *
   16660  *  Configure the K1 power state based on the provided parameter.
   16661  *  Assumes semaphore already acquired.
   16662  */
   16663 static void
   16664 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   16665 {
   16666 	uint32_t ctrl, ctrl_ext, tmp;
   16667 	uint16_t kmreg;
   16668 	int rv;
   16669 
   16670 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16671 
   16672 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   16673 	if (rv != 0)
   16674 		return;
   16675 
   16676 	if (k1_enable)
   16677 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   16678 	else
   16679 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   16680 
   16681 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   16682 	if (rv != 0)
   16683 		return;
   16684 
   16685 	delay(20);
   16686 
   16687 	ctrl = CSR_READ(sc, WMREG_CTRL);
   16688 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   16689 
   16690 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   16691 	tmp |= CTRL_FRCSPD;
   16692 
   16693 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   16694 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   16695 	CSR_WRITE_FLUSH(sc);
   16696 	delay(20);
   16697 
   16698 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   16699 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   16700 	CSR_WRITE_FLUSH(sc);
   16701 	delay(20);
   16702 
   16703 	return;
   16704 }
   16705 
   16706 /* special case - for 82575 - need to do manual init ... */
   16707 static void
   16708 wm_reset_init_script_82575(struct wm_softc *sc)
   16709 {
   16710 	/*
   16711 	 * Remark: this is untested code - we have no board without EEPROM
   16712 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   16713 	 */
   16714 
   16715 	/* SerDes configuration via SERDESCTRL */
   16716 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   16717 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   16718 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   16719 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   16720 
   16721 	/* CCM configuration via CCMCTL register */
   16722 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   16723 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   16724 
   16725 	/* PCIe lanes configuration */
   16726 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   16727 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   16728 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   16729 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   16730 
   16731 	/* PCIe PLL Configuration */
   16732 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   16733 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   16734 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   16735 }
   16736 
   16737 static void
   16738 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   16739 {
   16740 	uint32_t reg;
   16741 	uint16_t nvmword;
   16742 	int rv;
   16743 
   16744 	if (sc->sc_type != WM_T_82580)
   16745 		return;
   16746 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   16747 		return;
   16748 
   16749 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   16750 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   16751 	if (rv != 0) {
   16752 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   16753 		    __func__);
   16754 		return;
   16755 	}
   16756 
   16757 	reg = CSR_READ(sc, WMREG_MDICNFG);
   16758 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   16759 		reg |= MDICNFG_DEST;
   16760 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   16761 		reg |= MDICNFG_COM_MDIO;
   16762 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16763 }
   16764 
   16765 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   16766 
   16767 static bool
   16768 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   16769 {
   16770 	uint32_t reg;
   16771 	uint16_t id1, id2;
   16772 	int i, rv;
   16773 
   16774 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16775 		device_xname(sc->sc_dev), __func__));
   16776 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16777 
   16778 	id1 = id2 = 0xffff;
   16779 	for (i = 0; i < 2; i++) {
   16780 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   16781 		    &id1);
   16782 		if ((rv != 0) || MII_INVALIDID(id1))
   16783 			continue;
   16784 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   16785 		    &id2);
   16786 		if ((rv != 0) || MII_INVALIDID(id2))
   16787 			continue;
   16788 		break;
   16789 	}
   16790 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   16791 		goto out;
   16792 
   16793 	/*
   16794 	 * In case the PHY needs to be in mdio slow mode,
   16795 	 * set slow mode and try to get the PHY id again.
   16796 	 */
   16797 	rv = 0;
   16798 	if (sc->sc_type < WM_T_PCH_LPT) {
   16799 		sc->phy.release(sc);
   16800 		wm_set_mdio_slow_mode_hv(sc);
   16801 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   16802 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   16803 		sc->phy.acquire(sc);
   16804 	}
   16805 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   16806 		device_printf(sc->sc_dev, "XXX return with false\n");
   16807 		return false;
   16808 	}
   16809 out:
   16810 	if (sc->sc_type >= WM_T_PCH_LPT) {
   16811 		/* Only unforce SMBus if ME is not active */
   16812 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16813 			uint16_t phyreg;
   16814 
   16815 			/* Unforce SMBus mode in PHY */
   16816 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   16817 			    CV_SMB_CTRL, &phyreg);
   16818 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   16819 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   16820 			    CV_SMB_CTRL, phyreg);
   16821 
   16822 			/* Unforce SMBus mode in MAC */
   16823 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16824 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16825 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16826 		}
   16827 	}
   16828 	return true;
   16829 }
   16830 
   16831 static void
   16832 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   16833 {
   16834 	uint32_t reg;
   16835 	int i;
   16836 
   16837 	/* Set PHY Config Counter to 50msec */
   16838 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   16839 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   16840 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   16841 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   16842 
   16843 	/* Toggle LANPHYPC */
   16844 	reg = CSR_READ(sc, WMREG_CTRL);
   16845 	reg |= CTRL_LANPHYPC_OVERRIDE;
   16846 	reg &= ~CTRL_LANPHYPC_VALUE;
   16847 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16848 	CSR_WRITE_FLUSH(sc);
   16849 	delay(1000);
   16850 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   16851 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16852 	CSR_WRITE_FLUSH(sc);
   16853 
   16854 	if (sc->sc_type < WM_T_PCH_LPT)
   16855 		delay(50 * 1000);
   16856 	else {
   16857 		i = 20;
   16858 
   16859 		do {
   16860 			delay(5 * 1000);
   16861 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   16862 		    && i--);
   16863 
   16864 		delay(30 * 1000);
   16865 	}
   16866 }
   16867 
   16868 static int
   16869 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   16870 {
   16871 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16872 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16873 	uint32_t rxa;
   16874 	uint16_t scale = 0, lat_enc = 0;
   16875 	int32_t obff_hwm = 0;
   16876 	int64_t lat_ns, value;
   16877 
   16878 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16879 		device_xname(sc->sc_dev), __func__));
   16880 
   16881 	if (link) {
   16882 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16883 		uint32_t status;
   16884 		uint16_t speed;
   16885 		pcireg_t preg;
   16886 
   16887 		status = CSR_READ(sc, WMREG_STATUS);
   16888 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16889 		case STATUS_SPEED_10:
   16890 			speed = 10;
   16891 			break;
   16892 		case STATUS_SPEED_100:
   16893 			speed = 100;
   16894 			break;
   16895 		case STATUS_SPEED_1000:
   16896 			speed = 1000;
   16897 			break;
   16898 		default:
   16899 			device_printf(sc->sc_dev, "Unknown speed "
   16900 			    "(status = %08x)\n", status);
   16901 			return -1;
   16902 		}
   16903 
   16904 		/* Rx Packet Buffer Allocation size (KB) */
   16905 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16906 
   16907 		/*
   16908 		 * Determine the maximum latency tolerated by the device.
   16909 		 *
   16910 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16911 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16912 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16913 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16914 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16915 		 */
   16916 		lat_ns = ((int64_t)rxa * 1024 -
   16917 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16918 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16919 		if (lat_ns < 0)
   16920 			lat_ns = 0;
   16921 		else
   16922 			lat_ns /= speed;
   16923 		value = lat_ns;
   16924 
   16925 		while (value > LTRV_VALUE) {
   16926 			scale ++;
   16927 			value = howmany(value, __BIT(5));
   16928 		}
   16929 		if (scale > LTRV_SCALE_MAX) {
   16930 			device_printf(sc->sc_dev,
   16931 			    "Invalid LTR latency scale %d\n", scale);
   16932 			return -1;
   16933 		}
   16934 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16935 
   16936 		/* Determine the maximum latency tolerated by the platform */
   16937 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16938 		    WM_PCI_LTR_CAP_LPT);
   16939 		max_snoop = preg & 0xffff;
   16940 		max_nosnoop = preg >> 16;
   16941 
   16942 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16943 
   16944 		if (lat_enc > max_ltr_enc) {
   16945 			lat_enc = max_ltr_enc;
   16946 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16947 			    * PCI_LTR_SCALETONS(
   16948 				    __SHIFTOUT(lat_enc,
   16949 					PCI_LTR_MAXSNOOPLAT_SCALE));
   16950 		}
   16951 
   16952 		if (lat_ns) {
   16953 			lat_ns *= speed * 1000;
   16954 			lat_ns /= 8;
   16955 			lat_ns /= 1000000000;
   16956 			obff_hwm = (int32_t)(rxa - lat_ns);
   16957 		}
   16958 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   16959 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   16960 			    "(rxa = %d, lat_ns = %d)\n",
   16961 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   16962 			return -1;
   16963 		}
   16964 	}
   16965 	/* Snoop and No-Snoop latencies the same */
   16966 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   16967 	CSR_WRITE(sc, WMREG_LTRV, reg);
   16968 
   16969 	/* Set OBFF high water mark */
   16970 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   16971 	reg |= obff_hwm;
   16972 	CSR_WRITE(sc, WMREG_SVT, reg);
   16973 
   16974 	/* Enable OBFF */
   16975 	reg = CSR_READ(sc, WMREG_SVCR);
   16976 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   16977 	CSR_WRITE(sc, WMREG_SVCR, reg);
   16978 
   16979 	return 0;
   16980 }
   16981 
   16982 /*
   16983  * I210 Errata 25 and I211 Errata 10
   16984  * Slow System Clock.
   16985  *
   16986  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   16987  */
   16988 static int
   16989 wm_pll_workaround_i210(struct wm_softc *sc)
   16990 {
   16991 	uint32_t mdicnfg, wuc;
   16992 	uint32_t reg;
   16993 	pcireg_t pcireg;
   16994 	uint32_t pmreg;
   16995 	uint16_t nvmword, tmp_nvmword;
   16996 	uint16_t phyval;
   16997 	bool wa_done = false;
   16998 	int i, rv = 0;
   16999 
   17000 	/* Get Power Management cap offset */
   17001 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   17002 	    &pmreg, NULL) == 0)
   17003 		return -1;
   17004 
   17005 	/* Save WUC and MDICNFG registers */
   17006 	wuc = CSR_READ(sc, WMREG_WUC);
   17007 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   17008 
   17009 	reg = mdicnfg & ~MDICNFG_DEST;
   17010 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17011 
   17012 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   17013 		/*
   17014 		 * The default value of the Initialization Control Word 1
   17015 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   17016 		 */
   17017 		nvmword = INVM_DEFAULT_AL;
   17018 	}
   17019 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   17020 
   17021 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   17022 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   17023 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   17024 
   17025 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   17026 			rv = 0;
   17027 			break; /* OK */
   17028 		} else
   17029 			rv = -1;
   17030 
   17031 		wa_done = true;
   17032 		/* Directly reset the internal PHY */
   17033 		reg = CSR_READ(sc, WMREG_CTRL);
   17034 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   17035 
   17036 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17037 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   17038 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17039 
   17040 		CSR_WRITE(sc, WMREG_WUC, 0);
   17041 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   17042 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17043 
   17044 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17045 		    pmreg + PCI_PMCSR);
   17046 		pcireg |= PCI_PMCSR_STATE_D3;
   17047 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17048 		    pmreg + PCI_PMCSR, pcireg);
   17049 		delay(1000);
   17050 		pcireg &= ~PCI_PMCSR_STATE_D3;
   17051 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17052 		    pmreg + PCI_PMCSR, pcireg);
   17053 
   17054 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   17055 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17056 
   17057 		/* Restore WUC register */
   17058 		CSR_WRITE(sc, WMREG_WUC, wuc);
   17059 	}
   17060 
   17061 	/* Restore MDICNFG setting */
   17062 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   17063 	if (wa_done)
   17064 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   17065 	return rv;
   17066 }
   17067 
   17068 static void
   17069 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   17070 {
   17071 	uint32_t reg;
   17072 
   17073 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17074 		device_xname(sc->sc_dev), __func__));
   17075 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   17076 	    || (sc->sc_type == WM_T_PCH_CNP));
   17077 
   17078 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   17079 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   17080 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   17081 
   17082 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   17083 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   17084 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   17085 }
   17086 
   17087 /* Sysctl function */
   17088 #ifdef WM_DEBUG
   17089 static int
   17090 wm_sysctl_debug(SYSCTLFN_ARGS)
   17091 {
   17092 	struct sysctlnode node = *rnode;
   17093 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
   17094 	uint32_t dflags;
   17095 	int error;
   17096 
   17097 	dflags = sc->sc_debug;
   17098 	node.sysctl_data = &dflags;
   17099 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   17100 
   17101 	if (error || newp == NULL)
   17102 		return error;
   17103 
   17104 	sc->sc_debug = dflags;
   17105 
   17106 	return 0;
   17107 }
   17108 #endif
   17109