Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.714
      1 /*	$NetBSD: if_wm.c,v 1.714 2021/10/20 08:06:45 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.714 2021/10/20 08:06:45 msaitoh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 #include <sys/sysctl.h>
    109 #include <sys/workqueue.h>
    110 #include <sys/atomic.h>
    111 
    112 #include <sys/rndsource.h>
    113 
    114 #include <net/if.h>
    115 #include <net/if_dl.h>
    116 #include <net/if_media.h>
    117 #include <net/if_ether.h>
    118 
    119 #include <net/bpf.h>
    120 
    121 #include <net/rss_config.h>
    122 
    123 #include <netinet/in.h>			/* XXX for struct ip */
    124 #include <netinet/in_systm.h>		/* XXX for struct ip */
    125 #include <netinet/ip.h>			/* XXX for struct ip */
    126 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    127 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    128 
    129 #include <sys/bus.h>
    130 #include <sys/intr.h>
    131 #include <machine/endian.h>
    132 
    133 #include <dev/mii/mii.h>
    134 #include <dev/mii/mdio.h>
    135 #include <dev/mii/miivar.h>
    136 #include <dev/mii/miidevs.h>
    137 #include <dev/mii/mii_bitbang.h>
    138 #include <dev/mii/ikphyreg.h>
    139 #include <dev/mii/igphyreg.h>
    140 #include <dev/mii/igphyvar.h>
    141 #include <dev/mii/inbmphyreg.h>
    142 #include <dev/mii/ihphyreg.h>
    143 #include <dev/mii/makphyreg.h>
    144 
    145 #include <dev/pci/pcireg.h>
    146 #include <dev/pci/pcivar.h>
    147 #include <dev/pci/pcidevs.h>
    148 
    149 #include <dev/pci/if_wmreg.h>
    150 #include <dev/pci/if_wmvar.h>
    151 
    152 #ifdef WM_DEBUG
    153 #define	WM_DEBUG_LINK		__BIT(0)
    154 #define	WM_DEBUG_TX		__BIT(1)
    155 #define	WM_DEBUG_RX		__BIT(2)
    156 #define	WM_DEBUG_GMII		__BIT(3)
    157 #define	WM_DEBUG_MANAGE		__BIT(4)
    158 #define	WM_DEBUG_NVM		__BIT(5)
    159 #define	WM_DEBUG_INIT		__BIT(6)
    160 #define	WM_DEBUG_LOCK		__BIT(7)
    161 
    162 #if 0
    163 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
    164 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
    165 	WM_DEBUG_LOCK
    166 #endif
    167 
    168 #define	DPRINTF(sc, x, y)			  \
    169 	do {					  \
    170 		if ((sc)->sc_debug & (x))	  \
    171 			printf y;		  \
    172 	} while (0)
    173 #else
    174 #define	DPRINTF(sc, x, y)	__nothing
    175 #endif /* WM_DEBUG */
    176 
    177 #ifdef NET_MPSAFE
    178 #define WM_MPSAFE	1
    179 #define WM_CALLOUT_FLAGS	CALLOUT_MPSAFE
    180 #define WM_SOFTINT_FLAGS	SOFTINT_MPSAFE
    181 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    182 #else
    183 #define WM_CALLOUT_FLAGS	0
    184 #define WM_SOFTINT_FLAGS	0
    185 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU
    186 #endif
    187 
    188 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    189 
    190 /*
    191  * This device driver's max interrupt numbers.
    192  */
    193 #define WM_MAX_NQUEUEINTR	16
    194 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    195 
    196 #ifndef WM_DISABLE_MSI
    197 #define	WM_DISABLE_MSI 0
    198 #endif
    199 #ifndef WM_DISABLE_MSIX
    200 #define	WM_DISABLE_MSIX 0
    201 #endif
    202 
    203 int wm_disable_msi = WM_DISABLE_MSI;
    204 int wm_disable_msix = WM_DISABLE_MSIX;
    205 
    206 #ifndef WM_WATCHDOG_TIMEOUT
    207 #define WM_WATCHDOG_TIMEOUT 5
    208 #endif
    209 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    210 
    211 /*
    212  * Transmit descriptor list size.  Due to errata, we can only have
    213  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    214  * on >= 82544. We tell the upper layers that they can queue a lot
    215  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    216  * of them at a time.
    217  *
    218  * We allow up to 64 DMA segments per packet.  Pathological packet
    219  * chains containing many small mbufs have been observed in zero-copy
    220  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    221  * m_defrag() is called to reduce it.
    222  */
    223 #define	WM_NTXSEGS		64
    224 #define	WM_IFQUEUELEN		256
    225 #define	WM_TXQUEUELEN_MAX	64
    226 #define	WM_TXQUEUELEN_MAX_82547	16
    227 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    228 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    229 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    230 #define	WM_NTXDESC_82542	256
    231 #define	WM_NTXDESC_82544	4096
    232 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    233 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    234 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    235 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    236 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    237 
    238 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    239 
    240 #define	WM_TXINTERQSIZE		256
    241 
    242 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    243 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    244 #endif
    245 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    246 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    247 #endif
    248 
    249 /*
    250  * Receive descriptor list size.  We have one Rx buffer for normal
    251  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    252  * packet.  We allocate 256 receive descriptors, each with a 2k
    253  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    254  */
    255 #define	WM_NRXDESC		256U
    256 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    257 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    258 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    259 
    260 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    261 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    262 #endif
    263 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    264 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    265 #endif
    266 
    267 typedef union txdescs {
    268 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    269 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    270 } txdescs_t;
    271 
    272 typedef union rxdescs {
    273 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    274 	ext_rxdesc_t	 sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    275 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    276 } rxdescs_t;
    277 
    278 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    279 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    280 
    281 /*
    282  * Software state for transmit jobs.
    283  */
    284 struct wm_txsoft {
    285 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    286 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    287 	int txs_firstdesc;		/* first descriptor in packet */
    288 	int txs_lastdesc;		/* last descriptor in packet */
    289 	int txs_ndesc;			/* # of descriptors used */
    290 };
    291 
    292 /*
    293  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    294  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    295  * them together.
    296  */
    297 struct wm_rxsoft {
    298 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    299 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    300 };
    301 
    302 #define WM_LINKUP_TIMEOUT	50
    303 
    304 static uint16_t swfwphysem[] = {
    305 	SWFW_PHY0_SM,
    306 	SWFW_PHY1_SM,
    307 	SWFW_PHY2_SM,
    308 	SWFW_PHY3_SM
    309 };
    310 
    311 static const uint32_t wm_82580_rxpbs_table[] = {
    312 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    313 };
    314 
    315 struct wm_softc;
    316 
    317 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
    318 #if !defined(WM_EVENT_COUNTERS)
    319 #define WM_EVENT_COUNTERS 1
    320 #endif
    321 #endif
    322 
    323 #ifdef WM_EVENT_COUNTERS
    324 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    325 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    326 	struct evcnt qname##_ev_##evname;
    327 
    328 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    329 	do {								\
    330 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    331 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    332 		    "%s%02d%s", #qname, (qnum), #evname);		\
    333 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    334 		    (evtype), NULL, (xname),				\
    335 		    (q)->qname##_##evname##_evcnt_name);		\
    336 	} while (0)
    337 
    338 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    339 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    340 
    341 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    342 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    343 
    344 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    345 	evcnt_detach(&(q)->qname##_ev_##evname);
    346 #endif /* WM_EVENT_COUNTERS */
    347 
    348 struct wm_txqueue {
    349 	kmutex_t *txq_lock;		/* lock for tx operations */
    350 
    351 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    352 
    353 	/* Software state for the transmit descriptors. */
    354 	int txq_num;			/* must be a power of two */
    355 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    356 
    357 	/* TX control data structures. */
    358 	int txq_ndesc;			/* must be a power of two */
    359 	size_t txq_descsize;		/* a tx descriptor size */
    360 	txdescs_t *txq_descs_u;
    361 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    362 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    363 	int txq_desc_rseg;		/* real number of control segment */
    364 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    365 #define	txq_descs	txq_descs_u->sctxu_txdescs
    366 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    367 
    368 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    369 
    370 	int txq_free;			/* number of free Tx descriptors */
    371 	int txq_next;			/* next ready Tx descriptor */
    372 
    373 	int txq_sfree;			/* number of free Tx jobs */
    374 	int txq_snext;			/* next free Tx job */
    375 	int txq_sdirty;			/* dirty Tx jobs */
    376 
    377 	/* These 4 variables are used only on the 82547. */
    378 	int txq_fifo_size;		/* Tx FIFO size */
    379 	int txq_fifo_head;		/* current head of FIFO */
    380 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    381 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    382 
    383 	/*
    384 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    385 	 * CPUs. This queue intermediate them without block.
    386 	 */
    387 	pcq_t *txq_interq;
    388 
    389 	/*
    390 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    391 	 * to manage Tx H/W queue's busy flag.
    392 	 */
    393 	int txq_flags;			/* flags for H/W queue, see below */
    394 #define	WM_TXQ_NO_SPACE		0x1
    395 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
    396 
    397 	bool txq_stopping;
    398 
    399 	bool txq_sending;
    400 	time_t txq_lastsent;
    401 
    402 	/* Checksum flags used for previous packet */
    403 	uint32_t	txq_last_hw_cmd;
    404 	uint8_t		txq_last_hw_fields;
    405 	uint16_t	txq_last_hw_ipcs;
    406 	uint16_t	txq_last_hw_tucs;
    407 
    408 	uint32_t txq_packets;		/* for AIM */
    409 	uint32_t txq_bytes;		/* for AIM */
    410 #ifdef WM_EVENT_COUNTERS
    411 	/* TX event counters */
    412 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    413 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    414 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    415 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    416 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    417 					    /* XXX not used? */
    418 
    419 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    420 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    421 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    422 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    423 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    424 	WM_Q_EVCNT_DEFINE(txq, tsopain)	    /* Painful header manip. for TSO */
    425 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    426 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    427 					    /* other than toomanyseg */
    428 
    429 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    430 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    431 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    432 	WM_Q_EVCNT_DEFINE(txq, skipcontext) /* Tx skip wring cksum context */
    433 
    434 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    435 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    436 #endif /* WM_EVENT_COUNTERS */
    437 };
    438 
    439 struct wm_rxqueue {
    440 	kmutex_t *rxq_lock;		/* lock for rx operations */
    441 
    442 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    443 
    444 	/* Software state for the receive descriptors. */
    445 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    446 
    447 	/* RX control data structures. */
    448 	int rxq_ndesc;			/* must be a power of two */
    449 	size_t rxq_descsize;		/* a rx descriptor size */
    450 	rxdescs_t *rxq_descs_u;
    451 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    452 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    453 	int rxq_desc_rseg;		/* real number of control segment */
    454 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    455 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    456 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    457 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    458 
    459 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    460 
    461 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    462 	int rxq_discard;
    463 	int rxq_len;
    464 	struct mbuf *rxq_head;
    465 	struct mbuf *rxq_tail;
    466 	struct mbuf **rxq_tailp;
    467 
    468 	bool rxq_stopping;
    469 
    470 	uint32_t rxq_packets;		/* for AIM */
    471 	uint32_t rxq_bytes;		/* for AIM */
    472 #ifdef WM_EVENT_COUNTERS
    473 	/* RX event counters */
    474 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    475 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    476 
    477 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    478 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    479 #endif
    480 };
    481 
    482 struct wm_queue {
    483 	int wmq_id;			/* index of TX/RX queues */
    484 	int wmq_intr_idx;		/* index of MSI-X tables */
    485 
    486 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    487 	bool wmq_set_itr;
    488 
    489 	struct wm_txqueue wmq_txq;
    490 	struct wm_rxqueue wmq_rxq;
    491 	char sysctlname[32];		/* Name for sysctl */
    492 
    493 	bool wmq_txrx_use_workqueue;
    494 	struct work wmq_cookie;
    495 	void *wmq_si;
    496 };
    497 
    498 struct wm_phyop {
    499 	int (*acquire)(struct wm_softc *);
    500 	void (*release)(struct wm_softc *);
    501 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    502 	int (*writereg_locked)(device_t, int, int, uint16_t);
    503 	int reset_delay_us;
    504 	bool no_errprint;
    505 };
    506 
    507 struct wm_nvmop {
    508 	int (*acquire)(struct wm_softc *);
    509 	void (*release)(struct wm_softc *);
    510 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    511 };
    512 
    513 /*
    514  * Software state per device.
    515  */
    516 struct wm_softc {
    517 	device_t sc_dev;		/* generic device information */
    518 	bus_space_tag_t sc_st;		/* bus space tag */
    519 	bus_space_handle_t sc_sh;	/* bus space handle */
    520 	bus_size_t sc_ss;		/* bus space size */
    521 	bus_space_tag_t sc_iot;		/* I/O space tag */
    522 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    523 	bus_size_t sc_ios;		/* I/O space size */
    524 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    525 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    526 	bus_size_t sc_flashs;		/* flash registers space size */
    527 	off_t sc_flashreg_offset;	/*
    528 					 * offset to flash registers from
    529 					 * start of BAR
    530 					 */
    531 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    532 
    533 	struct ethercom sc_ethercom;	/* ethernet common data */
    534 	struct mii_data sc_mii;		/* MII/media information */
    535 
    536 	pci_chipset_tag_t sc_pc;
    537 	pcitag_t sc_pcitag;
    538 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    539 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    540 
    541 	uint16_t sc_pcidevid;		/* PCI device ID */
    542 	wm_chip_type sc_type;		/* MAC type */
    543 	int sc_rev;			/* MAC revision */
    544 	wm_phy_type sc_phytype;		/* PHY type */
    545 	uint8_t sc_sfptype;		/* SFP type */
    546 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    547 #define	WM_MEDIATYPE_UNKNOWN		0x00
    548 #define	WM_MEDIATYPE_FIBER		0x01
    549 #define	WM_MEDIATYPE_COPPER		0x02
    550 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    551 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    552 	int sc_flags;			/* flags; see below */
    553 	u_short sc_if_flags;		/* last if_flags */
    554 	int sc_ec_capenable;		/* last ec_capenable */
    555 	int sc_flowflags;		/* 802.3x flow control flags */
    556 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    557 	int sc_align_tweak;
    558 
    559 	void *sc_ihs[WM_MAX_NINTR];	/*
    560 					 * interrupt cookie.
    561 					 * - legacy and msi use sc_ihs[0] only
    562 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    563 					 */
    564 	pci_intr_handle_t *sc_intrs;	/*
    565 					 * legacy and msi use sc_intrs[0] only
    566 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    567 					 */
    568 	int sc_nintrs;			/* number of interrupts */
    569 
    570 	int sc_link_intr_idx;		/* index of MSI-X tables */
    571 
    572 	callout_t sc_tick_ch;		/* tick callout */
    573 	bool sc_core_stopping;
    574 
    575 	int sc_nvm_ver_major;
    576 	int sc_nvm_ver_minor;
    577 	int sc_nvm_ver_build;
    578 	int sc_nvm_addrbits;		/* NVM address bits */
    579 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    580 	int sc_ich8_flash_base;
    581 	int sc_ich8_flash_bank_size;
    582 	int sc_nvm_k1_enabled;
    583 
    584 	int sc_nqueues;
    585 	struct wm_queue *sc_queue;
    586 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    587 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    588 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    589 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    590 	struct workqueue *sc_queue_wq;
    591 	bool sc_txrx_use_workqueue;
    592 
    593 	int sc_affinity_offset;
    594 
    595 #ifdef WM_EVENT_COUNTERS
    596 	/* Event counters. */
    597 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    598 
    599 	/* WM_T_82542_2_1 only */
    600 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    601 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    602 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    603 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    604 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    605 #endif /* WM_EVENT_COUNTERS */
    606 
    607 	struct sysctllog *sc_sysctllog;
    608 
    609 	/* This variable are used only on the 82547. */
    610 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    611 
    612 	uint32_t sc_ctrl;		/* prototype CTRL register */
    613 #if 0
    614 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    615 #endif
    616 	uint32_t sc_icr;		/* prototype interrupt bits */
    617 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    618 	uint32_t sc_tctl;		/* prototype TCTL register */
    619 	uint32_t sc_rctl;		/* prototype RCTL register */
    620 	uint32_t sc_txcw;		/* prototype TXCW register */
    621 	uint32_t sc_tipg;		/* prototype TIPG register */
    622 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    623 	uint32_t sc_pba;		/* prototype PBA register */
    624 
    625 	int sc_tbi_linkup;		/* TBI link status */
    626 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    627 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    628 
    629 	int sc_mchash_type;		/* multicast filter offset */
    630 
    631 	krndsource_t rnd_source;	/* random source */
    632 
    633 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    634 
    635 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    636 	kmutex_t *sc_ich_phymtx;	/*
    637 					 * 82574/82583/ICH/PCH specific PHY
    638 					 * mutex. For 82574/82583, the mutex
    639 					 * is used for both PHY and NVM.
    640 					 */
    641 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    642 
    643 	struct wm_phyop phy;
    644 	struct wm_nvmop nvm;
    645 #ifdef WM_DEBUG
    646 	uint32_t sc_debug;
    647 #endif
    648 };
    649 
    650 #define WM_CORE_LOCK(_sc)						\
    651 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    652 #define WM_CORE_UNLOCK(_sc)						\
    653 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    654 #define WM_CORE_LOCKED(_sc)						\
    655 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    656 
    657 #define	WM_RXCHAIN_RESET(rxq)						\
    658 do {									\
    659 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    660 	*(rxq)->rxq_tailp = NULL;					\
    661 	(rxq)->rxq_len = 0;						\
    662 } while (/*CONSTCOND*/0)
    663 
    664 #define	WM_RXCHAIN_LINK(rxq, m)						\
    665 do {									\
    666 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    667 	(rxq)->rxq_tailp = &(m)->m_next;				\
    668 } while (/*CONSTCOND*/0)
    669 
    670 #ifdef WM_EVENT_COUNTERS
    671 #ifdef __HAVE_ATOMIC64_LOADSTORE
    672 #define	WM_EVCNT_INCR(ev)						\
    673 	atomic_store_relaxed(&((ev)->ev_count),				\
    674 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
    675 #define	WM_EVCNT_ADD(ev, val)						\
    676 	atomic_store_relaxed(&((ev)->ev_count),				\
    677 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
    678 #else
    679 #define	WM_EVCNT_INCR(ev)						\
    680 	((ev)->ev_count)++
    681 #define	WM_EVCNT_ADD(ev, val)						\
    682 	(ev)->ev_count += (val)
    683 #endif
    684 
    685 #define WM_Q_EVCNT_INCR(qname, evname)			\
    686 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    687 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    688 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    689 #else /* !WM_EVENT_COUNTERS */
    690 #define	WM_EVCNT_INCR(ev)	/* nothing */
    691 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    692 
    693 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    694 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    695 #endif /* !WM_EVENT_COUNTERS */
    696 
    697 #define	CSR_READ(sc, reg)						\
    698 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    699 #define	CSR_WRITE(sc, reg, val)						\
    700 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    701 #define	CSR_WRITE_FLUSH(sc)						\
    702 	(void)CSR_READ((sc), WMREG_STATUS)
    703 
    704 #define ICH8_FLASH_READ32(sc, reg)					\
    705 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    706 	    (reg) + sc->sc_flashreg_offset)
    707 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    708 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    709 	    (reg) + sc->sc_flashreg_offset, (data))
    710 
    711 #define ICH8_FLASH_READ16(sc, reg)					\
    712 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    713 	    (reg) + sc->sc_flashreg_offset)
    714 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    715 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    716 	    (reg) + sc->sc_flashreg_offset, (data))
    717 
    718 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    719 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    720 
    721 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    722 #define	WM_CDTXADDR_HI(txq, x)						\
    723 	(sizeof(bus_addr_t) == 8 ?					\
    724 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    725 
    726 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    727 #define	WM_CDRXADDR_HI(rxq, x)						\
    728 	(sizeof(bus_addr_t) == 8 ?					\
    729 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    730 
    731 /*
    732  * Register read/write functions.
    733  * Other than CSR_{READ|WRITE}().
    734  */
    735 #if 0
    736 static inline uint32_t wm_io_read(struct wm_softc *, int);
    737 #endif
    738 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    739 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    740     uint32_t, uint32_t);
    741 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    742 
    743 /*
    744  * Descriptor sync/init functions.
    745  */
    746 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    747 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    748 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    749 
    750 /*
    751  * Device driver interface functions and commonly used functions.
    752  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    753  */
    754 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    755 static int	wm_match(device_t, cfdata_t, void *);
    756 static void	wm_attach(device_t, device_t, void *);
    757 static int	wm_detach(device_t, int);
    758 static bool	wm_suspend(device_t, const pmf_qual_t *);
    759 static bool	wm_resume(device_t, const pmf_qual_t *);
    760 static void	wm_watchdog(struct ifnet *);
    761 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    762     uint16_t *);
    763 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    764     uint16_t *);
    765 static void	wm_tick(void *);
    766 static int	wm_ifflags_cb(struct ethercom *);
    767 static int	wm_ioctl(struct ifnet *, u_long, void *);
    768 /* MAC address related */
    769 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    770 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    771 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    772 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    773 static int	wm_rar_count(struct wm_softc *);
    774 static void	wm_set_filter(struct wm_softc *);
    775 /* Reset and init related */
    776 static void	wm_set_vlan(struct wm_softc *);
    777 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    778 static void	wm_get_auto_rd_done(struct wm_softc *);
    779 static void	wm_lan_init_done(struct wm_softc *);
    780 static void	wm_get_cfg_done(struct wm_softc *);
    781 static int	wm_phy_post_reset(struct wm_softc *);
    782 static int	wm_write_smbus_addr(struct wm_softc *);
    783 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    784 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    785 static void	wm_initialize_hardware_bits(struct wm_softc *);
    786 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    787 static int	wm_reset_phy(struct wm_softc *);
    788 static void	wm_flush_desc_rings(struct wm_softc *);
    789 static void	wm_reset(struct wm_softc *);
    790 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    791 static void	wm_rxdrain(struct wm_rxqueue *);
    792 static void	wm_init_rss(struct wm_softc *);
    793 static void	wm_adjust_qnum(struct wm_softc *, int);
    794 static inline bool	wm_is_using_msix(struct wm_softc *);
    795 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    796 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    797 static int	wm_setup_legacy(struct wm_softc *);
    798 static int	wm_setup_msix(struct wm_softc *);
    799 static int	wm_init(struct ifnet *);
    800 static int	wm_init_locked(struct ifnet *);
    801 static void	wm_init_sysctls(struct wm_softc *);
    802 static void	wm_unset_stopping_flags(struct wm_softc *);
    803 static void	wm_set_stopping_flags(struct wm_softc *);
    804 static void	wm_stop(struct ifnet *, int);
    805 static void	wm_stop_locked(struct ifnet *, bool, bool);
    806 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    807 static void	wm_82547_txfifo_stall(void *);
    808 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    809 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    810 /* DMA related */
    811 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    812 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    813 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    814 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    815     struct wm_txqueue *);
    816 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    817 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    818 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    819     struct wm_rxqueue *);
    820 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    821 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    822 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    823 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    824 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    825 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    826 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    827     struct wm_txqueue *);
    828 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    829     struct wm_rxqueue *);
    830 static int	wm_alloc_txrx_queues(struct wm_softc *);
    831 static void	wm_free_txrx_queues(struct wm_softc *);
    832 static int	wm_init_txrx_queues(struct wm_softc *);
    833 /* Start */
    834 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    835     struct wm_txsoft *, uint32_t *, uint8_t *);
    836 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    837 static void	wm_start(struct ifnet *);
    838 static void	wm_start_locked(struct ifnet *);
    839 static int	wm_transmit(struct ifnet *, struct mbuf *);
    840 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    841 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    842 		    bool);
    843 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    844     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    845 static void	wm_nq_start(struct ifnet *);
    846 static void	wm_nq_start_locked(struct ifnet *);
    847 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    848 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    849 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    850 		    bool);
    851 static void	wm_deferred_start_locked(struct wm_txqueue *);
    852 static void	wm_handle_queue(void *);
    853 static void	wm_handle_queue_work(struct work *, void *);
    854 /* Interrupt */
    855 static bool	wm_txeof(struct wm_txqueue *, u_int);
    856 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    857 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    858 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    859 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    860 static void	wm_linkintr(struct wm_softc *, uint32_t);
    861 static int	wm_intr_legacy(void *);
    862 static inline void	wm_txrxintr_disable(struct wm_queue *);
    863 static inline void	wm_txrxintr_enable(struct wm_queue *);
    864 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    865 static int	wm_txrxintr_msix(void *);
    866 static int	wm_linkintr_msix(void *);
    867 
    868 /*
    869  * Media related.
    870  * GMII, SGMII, TBI, SERDES and SFP.
    871  */
    872 /* Common */
    873 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    874 /* GMII related */
    875 static void	wm_gmii_reset(struct wm_softc *);
    876 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    877 static int	wm_get_phy_id_82575(struct wm_softc *);
    878 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    879 static int	wm_gmii_mediachange(struct ifnet *);
    880 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    881 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    882 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    883 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    884 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    885 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    886 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    887 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    888 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    889 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    890 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    891 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    892 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    893 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    894 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    895 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    896 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    897 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    898 	bool);
    899 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    900 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    901 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    902 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    903 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    904 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    905 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    906 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    907 static void	wm_gmii_statchg(struct ifnet *);
    908 /*
    909  * kumeran related (80003, ICH* and PCH*).
    910  * These functions are not for accessing MII registers but for accessing
    911  * kumeran specific registers.
    912  */
    913 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    914 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    915 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    916 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    917 /* EMI register related */
    918 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    919 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    920 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    921 /* SGMII */
    922 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    923 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
    924 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    925 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    926 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    927 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    928 /* TBI related */
    929 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    930 static void	wm_tbi_mediainit(struct wm_softc *);
    931 static int	wm_tbi_mediachange(struct ifnet *);
    932 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    933 static int	wm_check_for_link(struct wm_softc *);
    934 static void	wm_tbi_tick(struct wm_softc *);
    935 /* SERDES related */
    936 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    937 static int	wm_serdes_mediachange(struct ifnet *);
    938 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    939 static void	wm_serdes_tick(struct wm_softc *);
    940 /* SFP related */
    941 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    942 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    943 
    944 /*
    945  * NVM related.
    946  * Microwire, SPI (w/wo EERD) and Flash.
    947  */
    948 /* Misc functions */
    949 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    950 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    951 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    952 /* Microwire */
    953 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    954 /* SPI */
    955 static int	wm_nvm_ready_spi(struct wm_softc *);
    956 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    957 /* Using with EERD */
    958 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    959 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    960 /* Flash */
    961 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    962     unsigned int *);
    963 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    964 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    965 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    966     uint32_t *);
    967 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    968 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    969 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    970 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    971 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    972 /* iNVM */
    973 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    974 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    975 /* Lock, detecting NVM type, validate checksum and read */
    976 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    977 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    978 static int	wm_nvm_validate_checksum(struct wm_softc *);
    979 static void	wm_nvm_version_invm(struct wm_softc *);
    980 static void	wm_nvm_version(struct wm_softc *);
    981 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    982 
    983 /*
    984  * Hardware semaphores.
    985  * Very complexed...
    986  */
    987 static int	wm_get_null(struct wm_softc *);
    988 static void	wm_put_null(struct wm_softc *);
    989 static int	wm_get_eecd(struct wm_softc *);
    990 static void	wm_put_eecd(struct wm_softc *);
    991 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    992 static void	wm_put_swsm_semaphore(struct wm_softc *);
    993 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    994 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    995 static int	wm_get_nvm_80003(struct wm_softc *);
    996 static void	wm_put_nvm_80003(struct wm_softc *);
    997 static int	wm_get_nvm_82571(struct wm_softc *);
    998 static void	wm_put_nvm_82571(struct wm_softc *);
    999 static int	wm_get_phy_82575(struct wm_softc *);
   1000 static void	wm_put_phy_82575(struct wm_softc *);
   1001 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
   1002 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
   1003 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
   1004 static void	wm_put_swflag_ich8lan(struct wm_softc *);
   1005 static int	wm_get_nvm_ich8lan(struct wm_softc *);
   1006 static void	wm_put_nvm_ich8lan(struct wm_softc *);
   1007 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
   1008 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
   1009 
   1010 /*
   1011  * Management mode and power management related subroutines.
   1012  * BMC, AMT, suspend/resume and EEE.
   1013  */
   1014 #if 0
   1015 static int	wm_check_mng_mode(struct wm_softc *);
   1016 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
   1017 static int	wm_check_mng_mode_82574(struct wm_softc *);
   1018 static int	wm_check_mng_mode_generic(struct wm_softc *);
   1019 #endif
   1020 static int	wm_enable_mng_pass_thru(struct wm_softc *);
   1021 static bool	wm_phy_resetisblocked(struct wm_softc *);
   1022 static void	wm_get_hw_control(struct wm_softc *);
   1023 static void	wm_release_hw_control(struct wm_softc *);
   1024 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
   1025 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
   1026 static void	wm_init_manageability(struct wm_softc *);
   1027 static void	wm_release_manageability(struct wm_softc *);
   1028 static void	wm_get_wakeup(struct wm_softc *);
   1029 static int	wm_ulp_disable(struct wm_softc *);
   1030 static int	wm_enable_phy_wakeup(struct wm_softc *);
   1031 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
   1032 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
   1033 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1034 static void	wm_enable_wakeup(struct wm_softc *);
   1035 static void	wm_disable_aspm(struct wm_softc *);
   1036 /* LPLU (Low Power Link Up) */
   1037 static void	wm_lplu_d0_disable(struct wm_softc *);
   1038 /* EEE */
   1039 static int	wm_set_eee_i350(struct wm_softc *);
   1040 static int	wm_set_eee_pchlan(struct wm_softc *);
   1041 static int	wm_set_eee(struct wm_softc *);
   1042 
   1043 /*
   1044  * Workarounds (mainly PHY related).
   1045  * Basically, PHY's workarounds are in the PHY drivers.
   1046  */
   1047 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1048 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1049 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1050 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1051 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
   1052 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
   1053 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1054 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1055 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1056 static int	wm_k1_workaround_lv(struct wm_softc *);
   1057 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1058 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1059 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1060 static void	wm_reset_init_script_82575(struct wm_softc *);
   1061 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1062 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1063 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1064 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1065 static int	wm_pll_workaround_i210(struct wm_softc *);
   1066 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1067 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
   1068 static void	wm_set_linkdown_discard(struct wm_softc *);
   1069 static void	wm_clear_linkdown_discard(struct wm_softc *);
   1070 
   1071 #ifdef WM_DEBUG
   1072 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
   1073 #endif
   1074 
   1075 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1076     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1077 
   1078 /*
   1079  * Devices supported by this driver.
   1080  */
   1081 static const struct wm_product {
   1082 	pci_vendor_id_t		wmp_vendor;
   1083 	pci_product_id_t	wmp_product;
   1084 	const char		*wmp_name;
   1085 	wm_chip_type		wmp_type;
   1086 	uint32_t		wmp_flags;
   1087 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1088 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1089 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1090 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1091 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1092 } wm_products[] = {
   1093 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1094 	  "Intel i82542 1000BASE-X Ethernet",
   1095 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1096 
   1097 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1098 	  "Intel i82543GC 1000BASE-X Ethernet",
   1099 	  WM_T_82543,		WMP_F_FIBER },
   1100 
   1101 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1102 	  "Intel i82543GC 1000BASE-T Ethernet",
   1103 	  WM_T_82543,		WMP_F_COPPER },
   1104 
   1105 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1106 	  "Intel i82544EI 1000BASE-T Ethernet",
   1107 	  WM_T_82544,		WMP_F_COPPER },
   1108 
   1109 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1110 	  "Intel i82544EI 1000BASE-X Ethernet",
   1111 	  WM_T_82544,		WMP_F_FIBER },
   1112 
   1113 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1114 	  "Intel i82544GC 1000BASE-T Ethernet",
   1115 	  WM_T_82544,		WMP_F_COPPER },
   1116 
   1117 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1118 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1119 	  WM_T_82544,		WMP_F_COPPER },
   1120 
   1121 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1122 	  "Intel i82540EM 1000BASE-T Ethernet",
   1123 	  WM_T_82540,		WMP_F_COPPER },
   1124 
   1125 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1126 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1127 	  WM_T_82540,		WMP_F_COPPER },
   1128 
   1129 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1130 	  "Intel i82540EP 1000BASE-T Ethernet",
   1131 	  WM_T_82540,		WMP_F_COPPER },
   1132 
   1133 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1134 	  "Intel i82540EP 1000BASE-T Ethernet",
   1135 	  WM_T_82540,		WMP_F_COPPER },
   1136 
   1137 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1138 	  "Intel i82540EP 1000BASE-T Ethernet",
   1139 	  WM_T_82540,		WMP_F_COPPER },
   1140 
   1141 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1142 	  "Intel i82545EM 1000BASE-T Ethernet",
   1143 	  WM_T_82545,		WMP_F_COPPER },
   1144 
   1145 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1146 	  "Intel i82545GM 1000BASE-T Ethernet",
   1147 	  WM_T_82545_3,		WMP_F_COPPER },
   1148 
   1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1150 	  "Intel i82545GM 1000BASE-X Ethernet",
   1151 	  WM_T_82545_3,		WMP_F_FIBER },
   1152 
   1153 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1154 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1155 	  WM_T_82545_3,		WMP_F_SERDES },
   1156 
   1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1158 	  "Intel i82546EB 1000BASE-T Ethernet",
   1159 	  WM_T_82546,		WMP_F_COPPER },
   1160 
   1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1162 	  "Intel i82546EB 1000BASE-T Ethernet",
   1163 	  WM_T_82546,		WMP_F_COPPER },
   1164 
   1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1166 	  "Intel i82545EM 1000BASE-X Ethernet",
   1167 	  WM_T_82545,		WMP_F_FIBER },
   1168 
   1169 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1170 	  "Intel i82546EB 1000BASE-X Ethernet",
   1171 	  WM_T_82546,		WMP_F_FIBER },
   1172 
   1173 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1174 	  "Intel i82546GB 1000BASE-T Ethernet",
   1175 	  WM_T_82546_3,		WMP_F_COPPER },
   1176 
   1177 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1178 	  "Intel i82546GB 1000BASE-X Ethernet",
   1179 	  WM_T_82546_3,		WMP_F_FIBER },
   1180 
   1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1182 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1183 	  WM_T_82546_3,		WMP_F_SERDES },
   1184 
   1185 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1186 	  "i82546GB quad-port Gigabit Ethernet",
   1187 	  WM_T_82546_3,		WMP_F_COPPER },
   1188 
   1189 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1190 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1191 	  WM_T_82546_3,		WMP_F_COPPER },
   1192 
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1194 	  "Intel PRO/1000MT (82546GB)",
   1195 	  WM_T_82546_3,		WMP_F_COPPER },
   1196 
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1198 	  "Intel i82541EI 1000BASE-T Ethernet",
   1199 	  WM_T_82541,		WMP_F_COPPER },
   1200 
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1202 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1203 	  WM_T_82541,		WMP_F_COPPER },
   1204 
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1206 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1207 	  WM_T_82541,		WMP_F_COPPER },
   1208 
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1210 	  "Intel i82541ER 1000BASE-T Ethernet",
   1211 	  WM_T_82541_2,		WMP_F_COPPER },
   1212 
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1214 	  "Intel i82541GI 1000BASE-T Ethernet",
   1215 	  WM_T_82541_2,		WMP_F_COPPER },
   1216 
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1218 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1219 	  WM_T_82541_2,		WMP_F_COPPER },
   1220 
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1222 	  "Intel i82541PI 1000BASE-T Ethernet",
   1223 	  WM_T_82541_2,		WMP_F_COPPER },
   1224 
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1226 	  "Intel i82547EI 1000BASE-T Ethernet",
   1227 	  WM_T_82547,		WMP_F_COPPER },
   1228 
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1230 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1231 	  WM_T_82547,		WMP_F_COPPER },
   1232 
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1234 	  "Intel i82547GI 1000BASE-T Ethernet",
   1235 	  WM_T_82547_2,		WMP_F_COPPER },
   1236 
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1238 	  "Intel PRO/1000 PT (82571EB)",
   1239 	  WM_T_82571,		WMP_F_COPPER },
   1240 
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1242 	  "Intel PRO/1000 PF (82571EB)",
   1243 	  WM_T_82571,		WMP_F_FIBER },
   1244 
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1246 	  "Intel PRO/1000 PB (82571EB)",
   1247 	  WM_T_82571,		WMP_F_SERDES },
   1248 
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1250 	  "Intel PRO/1000 QT (82571EB)",
   1251 	  WM_T_82571,		WMP_F_COPPER },
   1252 
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1254 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1255 	  WM_T_82571,		WMP_F_COPPER },
   1256 
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1258 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1259 	  WM_T_82571,		WMP_F_COPPER },
   1260 
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1262 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1263 	  WM_T_82571,		WMP_F_SERDES },
   1264 
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1266 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1267 	  WM_T_82571,		WMP_F_SERDES },
   1268 
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1270 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1271 	  WM_T_82571,		WMP_F_FIBER },
   1272 
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1274 	  "Intel i82572EI 1000baseT Ethernet",
   1275 	  WM_T_82572,		WMP_F_COPPER },
   1276 
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1278 	  "Intel i82572EI 1000baseX Ethernet",
   1279 	  WM_T_82572,		WMP_F_FIBER },
   1280 
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1282 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1283 	  WM_T_82572,		WMP_F_SERDES },
   1284 
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1286 	  "Intel i82572EI 1000baseT Ethernet",
   1287 	  WM_T_82572,		WMP_F_COPPER },
   1288 
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1290 	  "Intel i82573E",
   1291 	  WM_T_82573,		WMP_F_COPPER },
   1292 
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1294 	  "Intel i82573E IAMT",
   1295 	  WM_T_82573,		WMP_F_COPPER },
   1296 
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1298 	  "Intel i82573L Gigabit Ethernet",
   1299 	  WM_T_82573,		WMP_F_COPPER },
   1300 
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1302 	  "Intel i82574L",
   1303 	  WM_T_82574,		WMP_F_COPPER },
   1304 
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1306 	  "Intel i82574L",
   1307 	  WM_T_82574,		WMP_F_COPPER },
   1308 
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1310 	  "Intel i82583V",
   1311 	  WM_T_82583,		WMP_F_COPPER },
   1312 
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1314 	  "i80003 dual 1000baseT Ethernet",
   1315 	  WM_T_80003,		WMP_F_COPPER },
   1316 
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1318 	  "i80003 dual 1000baseX Ethernet",
   1319 	  WM_T_80003,		WMP_F_COPPER },
   1320 
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1322 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1323 	  WM_T_80003,		WMP_F_SERDES },
   1324 
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1326 	  "Intel i80003 1000baseT Ethernet",
   1327 	  WM_T_80003,		WMP_F_COPPER },
   1328 
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1330 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1331 	  WM_T_80003,		WMP_F_SERDES },
   1332 
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1334 	  "Intel i82801H (M_AMT) LAN Controller",
   1335 	  WM_T_ICH8,		WMP_F_COPPER },
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1337 	  "Intel i82801H (AMT) LAN Controller",
   1338 	  WM_T_ICH8,		WMP_F_COPPER },
   1339 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1340 	  "Intel i82801H LAN Controller",
   1341 	  WM_T_ICH8,		WMP_F_COPPER },
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1343 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1344 	  WM_T_ICH8,		WMP_F_COPPER },
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1346 	  "Intel i82801H (M) LAN Controller",
   1347 	  WM_T_ICH8,		WMP_F_COPPER },
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1349 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1350 	  WM_T_ICH8,		WMP_F_COPPER },
   1351 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1352 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1353 	  WM_T_ICH8,		WMP_F_COPPER },
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1355 	  "82567V-3 LAN Controller",
   1356 	  WM_T_ICH8,		WMP_F_COPPER },
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1358 	  "82801I (AMT) LAN Controller",
   1359 	  WM_T_ICH9,		WMP_F_COPPER },
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1361 	  "82801I 10/100 LAN Controller",
   1362 	  WM_T_ICH9,		WMP_F_COPPER },
   1363 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1364 	  "82801I (G) 10/100 LAN Controller",
   1365 	  WM_T_ICH9,		WMP_F_COPPER },
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1367 	  "82801I (GT) 10/100 LAN Controller",
   1368 	  WM_T_ICH9,		WMP_F_COPPER },
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1370 	  "82801I (C) LAN Controller",
   1371 	  WM_T_ICH9,		WMP_F_COPPER },
   1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1373 	  "82801I mobile LAN Controller",
   1374 	  WM_T_ICH9,		WMP_F_COPPER },
   1375 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1376 	  "82801I mobile (V) LAN Controller",
   1377 	  WM_T_ICH9,		WMP_F_COPPER },
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1379 	  "82801I mobile (AMT) LAN Controller",
   1380 	  WM_T_ICH9,		WMP_F_COPPER },
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1382 	  "82567LM-4 LAN Controller",
   1383 	  WM_T_ICH9,		WMP_F_COPPER },
   1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1385 	  "82567LM-2 LAN Controller",
   1386 	  WM_T_ICH10,		WMP_F_COPPER },
   1387 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1388 	  "82567LF-2 LAN Controller",
   1389 	  WM_T_ICH10,		WMP_F_COPPER },
   1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1391 	  "82567LM-3 LAN Controller",
   1392 	  WM_T_ICH10,		WMP_F_COPPER },
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1394 	  "82567LF-3 LAN Controller",
   1395 	  WM_T_ICH10,		WMP_F_COPPER },
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1397 	  "82567V-2 LAN Controller",
   1398 	  WM_T_ICH10,		WMP_F_COPPER },
   1399 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1400 	  "82567V-3? LAN Controller",
   1401 	  WM_T_ICH10,		WMP_F_COPPER },
   1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1403 	  "HANKSVILLE LAN Controller",
   1404 	  WM_T_ICH10,		WMP_F_COPPER },
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1406 	  "PCH LAN (82577LM) Controller",
   1407 	  WM_T_PCH,		WMP_F_COPPER },
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1409 	  "PCH LAN (82577LC) Controller",
   1410 	  WM_T_PCH,		WMP_F_COPPER },
   1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1412 	  "PCH LAN (82578DM) Controller",
   1413 	  WM_T_PCH,		WMP_F_COPPER },
   1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1415 	  "PCH LAN (82578DC) Controller",
   1416 	  WM_T_PCH,		WMP_F_COPPER },
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1418 	  "PCH2 LAN (82579LM) Controller",
   1419 	  WM_T_PCH2,		WMP_F_COPPER },
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1421 	  "PCH2 LAN (82579V) Controller",
   1422 	  WM_T_PCH2,		WMP_F_COPPER },
   1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1424 	  "82575EB dual-1000baseT Ethernet",
   1425 	  WM_T_82575,		WMP_F_COPPER },
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1427 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1428 	  WM_T_82575,		WMP_F_SERDES },
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1430 	  "82575GB quad-1000baseT Ethernet",
   1431 	  WM_T_82575,		WMP_F_COPPER },
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1433 	  "82575GB quad-1000baseT Ethernet (PM)",
   1434 	  WM_T_82575,		WMP_F_COPPER },
   1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1436 	  "82576 1000BaseT Ethernet",
   1437 	  WM_T_82576,		WMP_F_COPPER },
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1439 	  "82576 1000BaseX Ethernet",
   1440 	  WM_T_82576,		WMP_F_FIBER },
   1441 
   1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1443 	  "82576 gigabit Ethernet (SERDES)",
   1444 	  WM_T_82576,		WMP_F_SERDES },
   1445 
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1447 	  "82576 quad-1000BaseT Ethernet",
   1448 	  WM_T_82576,		WMP_F_COPPER },
   1449 
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1451 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1452 	  WM_T_82576,		WMP_F_COPPER },
   1453 
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1455 	  "82576 gigabit Ethernet",
   1456 	  WM_T_82576,		WMP_F_COPPER },
   1457 
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1459 	  "82576 gigabit Ethernet (SERDES)",
   1460 	  WM_T_82576,		WMP_F_SERDES },
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1462 	  "82576 quad-gigabit Ethernet (SERDES)",
   1463 	  WM_T_82576,		WMP_F_SERDES },
   1464 
   1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1466 	  "82580 1000BaseT Ethernet",
   1467 	  WM_T_82580,		WMP_F_COPPER },
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1469 	  "82580 1000BaseX Ethernet",
   1470 	  WM_T_82580,		WMP_F_FIBER },
   1471 
   1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1473 	  "82580 1000BaseT Ethernet (SERDES)",
   1474 	  WM_T_82580,		WMP_F_SERDES },
   1475 
   1476 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1477 	  "82580 gigabit Ethernet (SGMII)",
   1478 	  WM_T_82580,		WMP_F_COPPER },
   1479 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1480 	  "82580 dual-1000BaseT Ethernet",
   1481 	  WM_T_82580,		WMP_F_COPPER },
   1482 
   1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1484 	  "82580 quad-1000BaseX Ethernet",
   1485 	  WM_T_82580,		WMP_F_FIBER },
   1486 
   1487 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1488 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1489 	  WM_T_82580,		WMP_F_COPPER },
   1490 
   1491 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1492 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1493 	  WM_T_82580,		WMP_F_SERDES },
   1494 
   1495 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1496 	  "DH89XXCC 1000BASE-KX Ethernet",
   1497 	  WM_T_82580,		WMP_F_SERDES },
   1498 
   1499 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1500 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1501 	  WM_T_82580,		WMP_F_SERDES },
   1502 
   1503 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1504 	  "I350 Gigabit Network Connection",
   1505 	  WM_T_I350,		WMP_F_COPPER },
   1506 
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1508 	  "I350 Gigabit Fiber Network Connection",
   1509 	  WM_T_I350,		WMP_F_FIBER },
   1510 
   1511 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1512 	  "I350 Gigabit Backplane Connection",
   1513 	  WM_T_I350,		WMP_F_SERDES },
   1514 
   1515 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1516 	  "I350 Quad Port Gigabit Ethernet",
   1517 	  WM_T_I350,		WMP_F_SERDES },
   1518 
   1519 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1520 	  "I350 Gigabit Connection",
   1521 	  WM_T_I350,		WMP_F_COPPER },
   1522 
   1523 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1524 	  "I354 Gigabit Ethernet (KX)",
   1525 	  WM_T_I354,		WMP_F_SERDES },
   1526 
   1527 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1528 	  "I354 Gigabit Ethernet (SGMII)",
   1529 	  WM_T_I354,		WMP_F_COPPER },
   1530 
   1531 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1532 	  "I354 Gigabit Ethernet (2.5G)",
   1533 	  WM_T_I354,		WMP_F_COPPER },
   1534 
   1535 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1536 	  "I210-T1 Ethernet Server Adapter",
   1537 	  WM_T_I210,		WMP_F_COPPER },
   1538 
   1539 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1540 	  "I210 Ethernet (Copper OEM)",
   1541 	  WM_T_I210,		WMP_F_COPPER },
   1542 
   1543 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1544 	  "I210 Ethernet (Copper IT)",
   1545 	  WM_T_I210,		WMP_F_COPPER },
   1546 
   1547 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1548 	  "I210 Ethernet (Copper, FLASH less)",
   1549 	  WM_T_I210,		WMP_F_COPPER },
   1550 
   1551 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1552 	  "I210 Gigabit Ethernet (Fiber)",
   1553 	  WM_T_I210,		WMP_F_FIBER },
   1554 
   1555 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1556 	  "I210 Gigabit Ethernet (SERDES)",
   1557 	  WM_T_I210,		WMP_F_SERDES },
   1558 
   1559 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1560 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1561 	  WM_T_I210,		WMP_F_SERDES },
   1562 
   1563 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1564 	  "I210 Gigabit Ethernet (SGMII)",
   1565 	  WM_T_I210,		WMP_F_COPPER },
   1566 
   1567 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1568 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1569 	  WM_T_I210,		WMP_F_COPPER },
   1570 
   1571 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1572 	  "I211 Ethernet (COPPER)",
   1573 	  WM_T_I211,		WMP_F_COPPER },
   1574 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1575 	  "I217 V Ethernet Connection",
   1576 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1577 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1578 	  "I217 LM Ethernet Connection",
   1579 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1580 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1581 	  "I218 V Ethernet Connection",
   1582 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1583 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1584 	  "I218 V Ethernet Connection",
   1585 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1586 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1587 	  "I218 V Ethernet Connection",
   1588 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1589 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1590 	  "I218 LM Ethernet Connection",
   1591 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1592 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1593 	  "I218 LM Ethernet Connection",
   1594 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1595 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1596 	  "I218 LM Ethernet Connection",
   1597 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1598 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1599 	  "I219 LM Ethernet Connection",
   1600 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1601 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1602 	  "I219 LM (2) Ethernet Connection",
   1603 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1604 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1605 	  "I219 LM (3) Ethernet Connection",
   1606 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1607 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1608 	  "I219 LM (4) Ethernet Connection",
   1609 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1610 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1611 	  "I219 LM (5) Ethernet Connection",
   1612 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1613 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1614 	  "I219 LM (6) Ethernet Connection",
   1615 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1616 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1617 	  "I219 LM (7) Ethernet Connection",
   1618 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1619 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1620 	  "I219 LM (8) Ethernet Connection",
   1621 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1622 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1623 	  "I219 LM (9) Ethernet Connection",
   1624 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1625 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1626 	  "I219 LM (10) Ethernet Connection",
   1627 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1628 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1629 	  "I219 LM (11) Ethernet Connection",
   1630 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1631 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1632 	  "I219 LM (12) Ethernet Connection",
   1633 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1634 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1635 	  "I219 LM (13) Ethernet Connection",
   1636 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1637 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1638 	  "I219 LM (14) Ethernet Connection",
   1639 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1640 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1641 	  "I219 LM (15) Ethernet Connection",
   1642 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1643 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM16,
   1644 	  "I219 LM (16) Ethernet Connection",
   1645 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1646 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM17,
   1647 	  "I219 LM (17) Ethernet Connection",
   1648 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1649 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM18,
   1650 	  "I219 LM (18) Ethernet Connection",
   1651 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1652 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM19,
   1653 	  "I219 LM (19) Ethernet Connection",
   1654 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1655 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1656 	  "I219 V Ethernet Connection",
   1657 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1658 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1659 	  "I219 V (2) Ethernet Connection",
   1660 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1661 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1662 	  "I219 V (4) Ethernet Connection",
   1663 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1664 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1665 	  "I219 V (5) Ethernet Connection",
   1666 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1667 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1668 	  "I219 V (6) Ethernet Connection",
   1669 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1670 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1671 	  "I219 V (7) Ethernet Connection",
   1672 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1673 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1674 	  "I219 V (8) Ethernet Connection",
   1675 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1676 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1677 	  "I219 V (9) Ethernet Connection",
   1678 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1679 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1680 	  "I219 V (10) Ethernet Connection",
   1681 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1682 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1683 	  "I219 V (11) Ethernet Connection",
   1684 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1685 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1686 	  "I219 V (12) Ethernet Connection",
   1687 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1688 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1689 	  "I219 V (13) Ethernet Connection",
   1690 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1691 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1692 	  "I219 V (14) Ethernet Connection",
   1693 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1694 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V15,
   1695 	  "I219 V (15) Ethernet Connection",
   1696 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1697 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V16,
   1698 	  "I219 V (16) Ethernet Connection",
   1699 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1700 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V17,
   1701 	  "I219 V (17) Ethernet Connection",
   1702 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1703 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V18,
   1704 	  "I219 V (18) Ethernet Connection",
   1705 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1706 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V19,
   1707 	  "I219 V (19) Ethernet Connection",
   1708 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1709 	{ 0,			0,
   1710 	  NULL,
   1711 	  0,			0 },
   1712 };
   1713 
   1714 /*
   1715  * Register read/write functions.
   1716  * Other than CSR_{READ|WRITE}().
   1717  */
   1718 
   1719 #if 0 /* Not currently used */
   1720 static inline uint32_t
   1721 wm_io_read(struct wm_softc *sc, int reg)
   1722 {
   1723 
   1724 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1725 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1726 }
   1727 #endif
   1728 
   1729 static inline void
   1730 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1731 {
   1732 
   1733 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1734 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1735 }
   1736 
   1737 static inline void
   1738 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1739     uint32_t data)
   1740 {
   1741 	uint32_t regval;
   1742 	int i;
   1743 
   1744 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1745 
   1746 	CSR_WRITE(sc, reg, regval);
   1747 
   1748 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1749 		delay(5);
   1750 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1751 			break;
   1752 	}
   1753 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1754 		aprint_error("%s: WARNING:"
   1755 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1756 		    device_xname(sc->sc_dev), reg);
   1757 	}
   1758 }
   1759 
   1760 static inline void
   1761 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1762 {
   1763 	wa->wa_low = htole32(v & 0xffffffffU);
   1764 	if (sizeof(bus_addr_t) == 8)
   1765 		wa->wa_high = htole32((uint64_t) v >> 32);
   1766 	else
   1767 		wa->wa_high = 0;
   1768 }
   1769 
   1770 /*
   1771  * Descriptor sync/init functions.
   1772  */
   1773 static inline void
   1774 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1775 {
   1776 	struct wm_softc *sc = txq->txq_sc;
   1777 
   1778 	/* If it will wrap around, sync to the end of the ring. */
   1779 	if ((start + num) > WM_NTXDESC(txq)) {
   1780 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1781 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1782 		    (WM_NTXDESC(txq) - start), ops);
   1783 		num -= (WM_NTXDESC(txq) - start);
   1784 		start = 0;
   1785 	}
   1786 
   1787 	/* Now sync whatever is left. */
   1788 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1789 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1790 }
   1791 
   1792 static inline void
   1793 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1794 {
   1795 	struct wm_softc *sc = rxq->rxq_sc;
   1796 
   1797 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1798 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1799 }
   1800 
   1801 static inline void
   1802 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1803 {
   1804 	struct wm_softc *sc = rxq->rxq_sc;
   1805 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1806 	struct mbuf *m = rxs->rxs_mbuf;
   1807 
   1808 	/*
   1809 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1810 	 * so that the payload after the Ethernet header is aligned
   1811 	 * to a 4-byte boundary.
   1812 
   1813 	 * XXX BRAINDAMAGE ALERT!
   1814 	 * The stupid chip uses the same size for every buffer, which
   1815 	 * is set in the Receive Control register.  We are using the 2K
   1816 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1817 	 * reason, we can't "scoot" packets longer than the standard
   1818 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1819 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1820 	 * the upper layer copy the headers.
   1821 	 */
   1822 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1823 
   1824 	if (sc->sc_type == WM_T_82574) {
   1825 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1826 		rxd->erx_data.erxd_addr =
   1827 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1828 		rxd->erx_data.erxd_dd = 0;
   1829 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1830 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1831 
   1832 		rxd->nqrx_data.nrxd_paddr =
   1833 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1834 		/* Currently, split header is not supported. */
   1835 		rxd->nqrx_data.nrxd_haddr = 0;
   1836 	} else {
   1837 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1838 
   1839 		wm_set_dma_addr(&rxd->wrx_addr,
   1840 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1841 		rxd->wrx_len = 0;
   1842 		rxd->wrx_cksum = 0;
   1843 		rxd->wrx_status = 0;
   1844 		rxd->wrx_errors = 0;
   1845 		rxd->wrx_special = 0;
   1846 	}
   1847 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1848 
   1849 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1850 }
   1851 
   1852 /*
   1853  * Device driver interface functions and commonly used functions.
   1854  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1855  */
   1856 
   1857 /* Lookup supported device table */
   1858 static const struct wm_product *
   1859 wm_lookup(const struct pci_attach_args *pa)
   1860 {
   1861 	const struct wm_product *wmp;
   1862 
   1863 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1864 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1865 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1866 			return wmp;
   1867 	}
   1868 	return NULL;
   1869 }
   1870 
   1871 /* The match function (ca_match) */
   1872 static int
   1873 wm_match(device_t parent, cfdata_t cf, void *aux)
   1874 {
   1875 	struct pci_attach_args *pa = aux;
   1876 
   1877 	if (wm_lookup(pa) != NULL)
   1878 		return 1;
   1879 
   1880 	return 0;
   1881 }
   1882 
   1883 /* The attach function (ca_attach) */
   1884 static void
   1885 wm_attach(device_t parent, device_t self, void *aux)
   1886 {
   1887 	struct wm_softc *sc = device_private(self);
   1888 	struct pci_attach_args *pa = aux;
   1889 	prop_dictionary_t dict;
   1890 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1891 	pci_chipset_tag_t pc = pa->pa_pc;
   1892 	int counts[PCI_INTR_TYPE_SIZE];
   1893 	pci_intr_type_t max_type;
   1894 	const char *eetype, *xname;
   1895 	bus_space_tag_t memt;
   1896 	bus_space_handle_t memh;
   1897 	bus_size_t memsize;
   1898 	int memh_valid;
   1899 	int i, error;
   1900 	const struct wm_product *wmp;
   1901 	prop_data_t ea;
   1902 	prop_number_t pn;
   1903 	uint8_t enaddr[ETHER_ADDR_LEN];
   1904 	char buf[256];
   1905 	char wqname[MAXCOMLEN];
   1906 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1907 	pcireg_t preg, memtype;
   1908 	uint16_t eeprom_data, apme_mask;
   1909 	bool force_clear_smbi;
   1910 	uint32_t link_mode;
   1911 	uint32_t reg;
   1912 
   1913 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
   1914 	sc->sc_debug = WM_DEBUG_DEFAULT;
   1915 #endif
   1916 	sc->sc_dev = self;
   1917 	callout_init(&sc->sc_tick_ch, WM_CALLOUT_FLAGS);
   1918 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   1919 	sc->sc_core_stopping = false;
   1920 
   1921 	wmp = wm_lookup(pa);
   1922 #ifdef DIAGNOSTIC
   1923 	if (wmp == NULL) {
   1924 		printf("\n");
   1925 		panic("wm_attach: impossible");
   1926 	}
   1927 #endif
   1928 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1929 
   1930 	sc->sc_pc = pa->pa_pc;
   1931 	sc->sc_pcitag = pa->pa_tag;
   1932 
   1933 	if (pci_dma64_available(pa))
   1934 		sc->sc_dmat = pa->pa_dmat64;
   1935 	else
   1936 		sc->sc_dmat = pa->pa_dmat;
   1937 
   1938 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1939 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1940 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1941 
   1942 	sc->sc_type = wmp->wmp_type;
   1943 
   1944 	/* Set default function pointers */
   1945 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1946 	sc->phy.release = sc->nvm.release = wm_put_null;
   1947 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1948 
   1949 	if (sc->sc_type < WM_T_82543) {
   1950 		if (sc->sc_rev < 2) {
   1951 			aprint_error_dev(sc->sc_dev,
   1952 			    "i82542 must be at least rev. 2\n");
   1953 			return;
   1954 		}
   1955 		if (sc->sc_rev < 3)
   1956 			sc->sc_type = WM_T_82542_2_0;
   1957 	}
   1958 
   1959 	/*
   1960 	 * Disable MSI for Errata:
   1961 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1962 	 *
   1963 	 *  82544: Errata 25
   1964 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1965 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1966 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1967 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1968 	 *
   1969 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1970 	 *
   1971 	 *  82571 & 82572: Errata 63
   1972 	 */
   1973 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1974 	    || (sc->sc_type == WM_T_82572))
   1975 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1976 
   1977 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1978 	    || (sc->sc_type == WM_T_82580)
   1979 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1980 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1981 		sc->sc_flags |= WM_F_NEWQUEUE;
   1982 
   1983 	/* Set device properties (mactype) */
   1984 	dict = device_properties(sc->sc_dev);
   1985 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1986 
   1987 	/*
   1988 	 * Map the device.  All devices support memory-mapped acccess,
   1989 	 * and it is really required for normal operation.
   1990 	 */
   1991 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1992 	switch (memtype) {
   1993 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1994 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1995 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1996 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1997 		break;
   1998 	default:
   1999 		memh_valid = 0;
   2000 		break;
   2001 	}
   2002 
   2003 	if (memh_valid) {
   2004 		sc->sc_st = memt;
   2005 		sc->sc_sh = memh;
   2006 		sc->sc_ss = memsize;
   2007 	} else {
   2008 		aprint_error_dev(sc->sc_dev,
   2009 		    "unable to map device registers\n");
   2010 		return;
   2011 	}
   2012 
   2013 	/*
   2014 	 * In addition, i82544 and later support I/O mapped indirect
   2015 	 * register access.  It is not desirable (nor supported in
   2016 	 * this driver) to use it for normal operation, though it is
   2017 	 * required to work around bugs in some chip versions.
   2018 	 */
   2019 	switch (sc->sc_type) {
   2020 	case WM_T_82544:
   2021 	case WM_T_82541:
   2022 	case WM_T_82541_2:
   2023 	case WM_T_82547:
   2024 	case WM_T_82547_2:
   2025 		/* First we have to find the I/O BAR. */
   2026 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   2027 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   2028 			if (memtype == PCI_MAPREG_TYPE_IO)
   2029 				break;
   2030 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   2031 			    PCI_MAPREG_MEM_TYPE_64BIT)
   2032 				i += 4;	/* skip high bits, too */
   2033 		}
   2034 		if (i < PCI_MAPREG_END) {
   2035 			/*
   2036 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   2037 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   2038 			 * It's no problem because newer chips has no this
   2039 			 * bug.
   2040 			 *
   2041 			 * The i8254x doesn't apparently respond when the
   2042 			 * I/O BAR is 0, which looks somewhat like it's not
   2043 			 * been configured.
   2044 			 */
   2045 			preg = pci_conf_read(pc, pa->pa_tag, i);
   2046 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   2047 				aprint_error_dev(sc->sc_dev,
   2048 				    "WARNING: I/O BAR at zero.\n");
   2049 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   2050 					0, &sc->sc_iot, &sc->sc_ioh,
   2051 					NULL, &sc->sc_ios) == 0) {
   2052 				sc->sc_flags |= WM_F_IOH_VALID;
   2053 			} else
   2054 				aprint_error_dev(sc->sc_dev,
   2055 				    "WARNING: unable to map I/O space\n");
   2056 		}
   2057 		break;
   2058 	default:
   2059 		break;
   2060 	}
   2061 
   2062 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   2063 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   2064 	preg |= PCI_COMMAND_MASTER_ENABLE;
   2065 	if (sc->sc_type < WM_T_82542_2_1)
   2066 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   2067 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   2068 
   2069 	/* Power up chip */
   2070 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   2071 	    && error != EOPNOTSUPP) {
   2072 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   2073 		return;
   2074 	}
   2075 
   2076 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   2077 	/*
   2078 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   2079 	 * resource.
   2080 	 */
   2081 	if (sc->sc_nqueues > 1) {
   2082 		max_type = PCI_INTR_TYPE_MSIX;
   2083 		/*
   2084 		 *  82583 has a MSI-X capability in the PCI configuration space
   2085 		 * but it doesn't support it. At least the document doesn't
   2086 		 * say anything about MSI-X.
   2087 		 */
   2088 		counts[PCI_INTR_TYPE_MSIX]
   2089 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2090 	} else {
   2091 		max_type = PCI_INTR_TYPE_MSI;
   2092 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2093 	}
   2094 
   2095 	/* Allocation settings */
   2096 	counts[PCI_INTR_TYPE_MSI] = 1;
   2097 	counts[PCI_INTR_TYPE_INTX] = 1;
   2098 	/* overridden by disable flags */
   2099 	if (wm_disable_msi != 0) {
   2100 		counts[PCI_INTR_TYPE_MSI] = 0;
   2101 		if (wm_disable_msix != 0) {
   2102 			max_type = PCI_INTR_TYPE_INTX;
   2103 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2104 		}
   2105 	} else if (wm_disable_msix != 0) {
   2106 		max_type = PCI_INTR_TYPE_MSI;
   2107 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2108 	}
   2109 
   2110 alloc_retry:
   2111 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2112 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2113 		return;
   2114 	}
   2115 
   2116 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2117 		error = wm_setup_msix(sc);
   2118 		if (error) {
   2119 			pci_intr_release(pc, sc->sc_intrs,
   2120 			    counts[PCI_INTR_TYPE_MSIX]);
   2121 
   2122 			/* Setup for MSI: Disable MSI-X */
   2123 			max_type = PCI_INTR_TYPE_MSI;
   2124 			counts[PCI_INTR_TYPE_MSI] = 1;
   2125 			counts[PCI_INTR_TYPE_INTX] = 1;
   2126 			goto alloc_retry;
   2127 		}
   2128 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2129 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2130 		error = wm_setup_legacy(sc);
   2131 		if (error) {
   2132 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2133 			    counts[PCI_INTR_TYPE_MSI]);
   2134 
   2135 			/* The next try is for INTx: Disable MSI */
   2136 			max_type = PCI_INTR_TYPE_INTX;
   2137 			counts[PCI_INTR_TYPE_INTX] = 1;
   2138 			goto alloc_retry;
   2139 		}
   2140 	} else {
   2141 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2142 		error = wm_setup_legacy(sc);
   2143 		if (error) {
   2144 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2145 			    counts[PCI_INTR_TYPE_INTX]);
   2146 			return;
   2147 		}
   2148 	}
   2149 
   2150 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2151 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2152 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2153 	    WM_WORKQUEUE_FLAGS);
   2154 	if (error) {
   2155 		aprint_error_dev(sc->sc_dev,
   2156 		    "unable to create workqueue\n");
   2157 		goto out;
   2158 	}
   2159 
   2160 	/*
   2161 	 * Check the function ID (unit number of the chip).
   2162 	 */
   2163 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2164 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2165 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2166 	    || (sc->sc_type == WM_T_82580)
   2167 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2168 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2169 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2170 	else
   2171 		sc->sc_funcid = 0;
   2172 
   2173 	/*
   2174 	 * Determine a few things about the bus we're connected to.
   2175 	 */
   2176 	if (sc->sc_type < WM_T_82543) {
   2177 		/* We don't really know the bus characteristics here. */
   2178 		sc->sc_bus_speed = 33;
   2179 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2180 		/*
   2181 		 * CSA (Communication Streaming Architecture) is about as fast
   2182 		 * a 32-bit 66MHz PCI Bus.
   2183 		 */
   2184 		sc->sc_flags |= WM_F_CSA;
   2185 		sc->sc_bus_speed = 66;
   2186 		aprint_verbose_dev(sc->sc_dev,
   2187 		    "Communication Streaming Architecture\n");
   2188 		if (sc->sc_type == WM_T_82547) {
   2189 			callout_init(&sc->sc_txfifo_ch, WM_CALLOUT_FLAGS);
   2190 			callout_setfunc(&sc->sc_txfifo_ch,
   2191 			    wm_82547_txfifo_stall, sc);
   2192 			aprint_verbose_dev(sc->sc_dev,
   2193 			    "using 82547 Tx FIFO stall work-around\n");
   2194 		}
   2195 	} else if (sc->sc_type >= WM_T_82571) {
   2196 		sc->sc_flags |= WM_F_PCIE;
   2197 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2198 		    && (sc->sc_type != WM_T_ICH10)
   2199 		    && (sc->sc_type != WM_T_PCH)
   2200 		    && (sc->sc_type != WM_T_PCH2)
   2201 		    && (sc->sc_type != WM_T_PCH_LPT)
   2202 		    && (sc->sc_type != WM_T_PCH_SPT)
   2203 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2204 			/* ICH* and PCH* have no PCIe capability registers */
   2205 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2206 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2207 				NULL) == 0)
   2208 				aprint_error_dev(sc->sc_dev,
   2209 				    "unable to find PCIe capability\n");
   2210 		}
   2211 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2212 	} else {
   2213 		reg = CSR_READ(sc, WMREG_STATUS);
   2214 		if (reg & STATUS_BUS64)
   2215 			sc->sc_flags |= WM_F_BUS64;
   2216 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2217 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2218 
   2219 			sc->sc_flags |= WM_F_PCIX;
   2220 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2221 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2222 				aprint_error_dev(sc->sc_dev,
   2223 				    "unable to find PCIX capability\n");
   2224 			else if (sc->sc_type != WM_T_82545_3 &&
   2225 				 sc->sc_type != WM_T_82546_3) {
   2226 				/*
   2227 				 * Work around a problem caused by the BIOS
   2228 				 * setting the max memory read byte count
   2229 				 * incorrectly.
   2230 				 */
   2231 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2232 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2233 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2234 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2235 
   2236 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2237 				    PCIX_CMD_BYTECNT_SHIFT;
   2238 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2239 				    PCIX_STATUS_MAXB_SHIFT;
   2240 				if (bytecnt > maxb) {
   2241 					aprint_verbose_dev(sc->sc_dev,
   2242 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2243 					    512 << bytecnt, 512 << maxb);
   2244 					pcix_cmd = (pcix_cmd &
   2245 					    ~PCIX_CMD_BYTECNT_MASK) |
   2246 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2247 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2248 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2249 					    pcix_cmd);
   2250 				}
   2251 			}
   2252 		}
   2253 		/*
   2254 		 * The quad port adapter is special; it has a PCIX-PCIX
   2255 		 * bridge on the board, and can run the secondary bus at
   2256 		 * a higher speed.
   2257 		 */
   2258 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2259 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2260 								      : 66;
   2261 		} else if (sc->sc_flags & WM_F_PCIX) {
   2262 			switch (reg & STATUS_PCIXSPD_MASK) {
   2263 			case STATUS_PCIXSPD_50_66:
   2264 				sc->sc_bus_speed = 66;
   2265 				break;
   2266 			case STATUS_PCIXSPD_66_100:
   2267 				sc->sc_bus_speed = 100;
   2268 				break;
   2269 			case STATUS_PCIXSPD_100_133:
   2270 				sc->sc_bus_speed = 133;
   2271 				break;
   2272 			default:
   2273 				aprint_error_dev(sc->sc_dev,
   2274 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2275 				    reg & STATUS_PCIXSPD_MASK);
   2276 				sc->sc_bus_speed = 66;
   2277 				break;
   2278 			}
   2279 		} else
   2280 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2281 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2282 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2283 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2284 	}
   2285 
   2286 	/* clear interesting stat counters */
   2287 	CSR_READ(sc, WMREG_COLC);
   2288 	CSR_READ(sc, WMREG_RXERRC);
   2289 
   2290 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2291 	    || (sc->sc_type >= WM_T_ICH8))
   2292 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2293 	if (sc->sc_type >= WM_T_ICH8)
   2294 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2295 
   2296 	/* Set PHY, NVM mutex related stuff */
   2297 	switch (sc->sc_type) {
   2298 	case WM_T_82542_2_0:
   2299 	case WM_T_82542_2_1:
   2300 	case WM_T_82543:
   2301 	case WM_T_82544:
   2302 		/* Microwire */
   2303 		sc->nvm.read = wm_nvm_read_uwire;
   2304 		sc->sc_nvm_wordsize = 64;
   2305 		sc->sc_nvm_addrbits = 6;
   2306 		break;
   2307 	case WM_T_82540:
   2308 	case WM_T_82545:
   2309 	case WM_T_82545_3:
   2310 	case WM_T_82546:
   2311 	case WM_T_82546_3:
   2312 		/* Microwire */
   2313 		sc->nvm.read = wm_nvm_read_uwire;
   2314 		reg = CSR_READ(sc, WMREG_EECD);
   2315 		if (reg & EECD_EE_SIZE) {
   2316 			sc->sc_nvm_wordsize = 256;
   2317 			sc->sc_nvm_addrbits = 8;
   2318 		} else {
   2319 			sc->sc_nvm_wordsize = 64;
   2320 			sc->sc_nvm_addrbits = 6;
   2321 		}
   2322 		sc->sc_flags |= WM_F_LOCK_EECD;
   2323 		sc->nvm.acquire = wm_get_eecd;
   2324 		sc->nvm.release = wm_put_eecd;
   2325 		break;
   2326 	case WM_T_82541:
   2327 	case WM_T_82541_2:
   2328 	case WM_T_82547:
   2329 	case WM_T_82547_2:
   2330 		reg = CSR_READ(sc, WMREG_EECD);
   2331 		/*
   2332 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2333 		 * on 8254[17], so set flags and functios before calling it.
   2334 		 */
   2335 		sc->sc_flags |= WM_F_LOCK_EECD;
   2336 		sc->nvm.acquire = wm_get_eecd;
   2337 		sc->nvm.release = wm_put_eecd;
   2338 		if (reg & EECD_EE_TYPE) {
   2339 			/* SPI */
   2340 			sc->nvm.read = wm_nvm_read_spi;
   2341 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2342 			wm_nvm_set_addrbits_size_eecd(sc);
   2343 		} else {
   2344 			/* Microwire */
   2345 			sc->nvm.read = wm_nvm_read_uwire;
   2346 			if ((reg & EECD_EE_ABITS) != 0) {
   2347 				sc->sc_nvm_wordsize = 256;
   2348 				sc->sc_nvm_addrbits = 8;
   2349 			} else {
   2350 				sc->sc_nvm_wordsize = 64;
   2351 				sc->sc_nvm_addrbits = 6;
   2352 			}
   2353 		}
   2354 		break;
   2355 	case WM_T_82571:
   2356 	case WM_T_82572:
   2357 		/* SPI */
   2358 		sc->nvm.read = wm_nvm_read_eerd;
   2359 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2360 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2361 		wm_nvm_set_addrbits_size_eecd(sc);
   2362 		sc->phy.acquire = wm_get_swsm_semaphore;
   2363 		sc->phy.release = wm_put_swsm_semaphore;
   2364 		sc->nvm.acquire = wm_get_nvm_82571;
   2365 		sc->nvm.release = wm_put_nvm_82571;
   2366 		break;
   2367 	case WM_T_82573:
   2368 	case WM_T_82574:
   2369 	case WM_T_82583:
   2370 		sc->nvm.read = wm_nvm_read_eerd;
   2371 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2372 		if (sc->sc_type == WM_T_82573) {
   2373 			sc->phy.acquire = wm_get_swsm_semaphore;
   2374 			sc->phy.release = wm_put_swsm_semaphore;
   2375 			sc->nvm.acquire = wm_get_nvm_82571;
   2376 			sc->nvm.release = wm_put_nvm_82571;
   2377 		} else {
   2378 			/* Both PHY and NVM use the same semaphore. */
   2379 			sc->phy.acquire = sc->nvm.acquire
   2380 			    = wm_get_swfwhw_semaphore;
   2381 			sc->phy.release = sc->nvm.release
   2382 			    = wm_put_swfwhw_semaphore;
   2383 		}
   2384 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2385 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2386 			sc->sc_nvm_wordsize = 2048;
   2387 		} else {
   2388 			/* SPI */
   2389 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2390 			wm_nvm_set_addrbits_size_eecd(sc);
   2391 		}
   2392 		break;
   2393 	case WM_T_82575:
   2394 	case WM_T_82576:
   2395 	case WM_T_82580:
   2396 	case WM_T_I350:
   2397 	case WM_T_I354:
   2398 	case WM_T_80003:
   2399 		/* SPI */
   2400 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2401 		wm_nvm_set_addrbits_size_eecd(sc);
   2402 		if ((sc->sc_type == WM_T_80003)
   2403 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2404 			sc->nvm.read = wm_nvm_read_eerd;
   2405 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2406 		} else {
   2407 			sc->nvm.read = wm_nvm_read_spi;
   2408 			sc->sc_flags |= WM_F_LOCK_EECD;
   2409 		}
   2410 		sc->phy.acquire = wm_get_phy_82575;
   2411 		sc->phy.release = wm_put_phy_82575;
   2412 		sc->nvm.acquire = wm_get_nvm_80003;
   2413 		sc->nvm.release = wm_put_nvm_80003;
   2414 		break;
   2415 	case WM_T_ICH8:
   2416 	case WM_T_ICH9:
   2417 	case WM_T_ICH10:
   2418 	case WM_T_PCH:
   2419 	case WM_T_PCH2:
   2420 	case WM_T_PCH_LPT:
   2421 		sc->nvm.read = wm_nvm_read_ich8;
   2422 		/* FLASH */
   2423 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2424 		sc->sc_nvm_wordsize = 2048;
   2425 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2426 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2427 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2428 			aprint_error_dev(sc->sc_dev,
   2429 			    "can't map FLASH registers\n");
   2430 			goto out;
   2431 		}
   2432 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2433 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2434 		    ICH_FLASH_SECTOR_SIZE;
   2435 		sc->sc_ich8_flash_bank_size =
   2436 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2437 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2438 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2439 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2440 		sc->sc_flashreg_offset = 0;
   2441 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2442 		sc->phy.release = wm_put_swflag_ich8lan;
   2443 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2444 		sc->nvm.release = wm_put_nvm_ich8lan;
   2445 		break;
   2446 	case WM_T_PCH_SPT:
   2447 	case WM_T_PCH_CNP:
   2448 		sc->nvm.read = wm_nvm_read_spt;
   2449 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2450 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2451 		sc->sc_flasht = sc->sc_st;
   2452 		sc->sc_flashh = sc->sc_sh;
   2453 		sc->sc_ich8_flash_base = 0;
   2454 		sc->sc_nvm_wordsize =
   2455 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2456 		    * NVM_SIZE_MULTIPLIER;
   2457 		/* It is size in bytes, we want words */
   2458 		sc->sc_nvm_wordsize /= 2;
   2459 		/* Assume 2 banks */
   2460 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2461 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2462 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2463 		sc->phy.release = wm_put_swflag_ich8lan;
   2464 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2465 		sc->nvm.release = wm_put_nvm_ich8lan;
   2466 		break;
   2467 	case WM_T_I210:
   2468 	case WM_T_I211:
   2469 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2470 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2471 		if (wm_nvm_flash_presence_i210(sc)) {
   2472 			sc->nvm.read = wm_nvm_read_eerd;
   2473 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2474 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2475 			wm_nvm_set_addrbits_size_eecd(sc);
   2476 		} else {
   2477 			sc->nvm.read = wm_nvm_read_invm;
   2478 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2479 			sc->sc_nvm_wordsize = INVM_SIZE;
   2480 		}
   2481 		sc->phy.acquire = wm_get_phy_82575;
   2482 		sc->phy.release = wm_put_phy_82575;
   2483 		sc->nvm.acquire = wm_get_nvm_80003;
   2484 		sc->nvm.release = wm_put_nvm_80003;
   2485 		break;
   2486 	default:
   2487 		break;
   2488 	}
   2489 
   2490 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2491 	switch (sc->sc_type) {
   2492 	case WM_T_82571:
   2493 	case WM_T_82572:
   2494 		reg = CSR_READ(sc, WMREG_SWSM2);
   2495 		if ((reg & SWSM2_LOCK) == 0) {
   2496 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2497 			force_clear_smbi = true;
   2498 		} else
   2499 			force_clear_smbi = false;
   2500 		break;
   2501 	case WM_T_82573:
   2502 	case WM_T_82574:
   2503 	case WM_T_82583:
   2504 		force_clear_smbi = true;
   2505 		break;
   2506 	default:
   2507 		force_clear_smbi = false;
   2508 		break;
   2509 	}
   2510 	if (force_clear_smbi) {
   2511 		reg = CSR_READ(sc, WMREG_SWSM);
   2512 		if ((reg & SWSM_SMBI) != 0)
   2513 			aprint_error_dev(sc->sc_dev,
   2514 			    "Please update the Bootagent\n");
   2515 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2516 	}
   2517 
   2518 	/*
   2519 	 * Defer printing the EEPROM type until after verifying the checksum
   2520 	 * This allows the EEPROM type to be printed correctly in the case
   2521 	 * that no EEPROM is attached.
   2522 	 */
   2523 	/*
   2524 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2525 	 * this for later, so we can fail future reads from the EEPROM.
   2526 	 */
   2527 	if (wm_nvm_validate_checksum(sc)) {
   2528 		/*
   2529 		 * Read twice again because some PCI-e parts fail the
   2530 		 * first check due to the link being in sleep state.
   2531 		 */
   2532 		if (wm_nvm_validate_checksum(sc))
   2533 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2534 	}
   2535 
   2536 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2537 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2538 	else {
   2539 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2540 		    sc->sc_nvm_wordsize);
   2541 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2542 			aprint_verbose("iNVM");
   2543 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2544 			aprint_verbose("FLASH(HW)");
   2545 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2546 			aprint_verbose("FLASH");
   2547 		else {
   2548 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2549 				eetype = "SPI";
   2550 			else
   2551 				eetype = "MicroWire";
   2552 			aprint_verbose("(%d address bits) %s EEPROM",
   2553 			    sc->sc_nvm_addrbits, eetype);
   2554 		}
   2555 	}
   2556 	wm_nvm_version(sc);
   2557 	aprint_verbose("\n");
   2558 
   2559 	/*
   2560 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2561 	 * incorrect.
   2562 	 */
   2563 	wm_gmii_setup_phytype(sc, 0, 0);
   2564 
   2565 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2566 	switch (sc->sc_type) {
   2567 	case WM_T_ICH8:
   2568 	case WM_T_ICH9:
   2569 	case WM_T_ICH10:
   2570 	case WM_T_PCH:
   2571 	case WM_T_PCH2:
   2572 	case WM_T_PCH_LPT:
   2573 	case WM_T_PCH_SPT:
   2574 	case WM_T_PCH_CNP:
   2575 		apme_mask = WUC_APME;
   2576 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2577 		if ((eeprom_data & apme_mask) != 0)
   2578 			sc->sc_flags |= WM_F_WOL;
   2579 		break;
   2580 	default:
   2581 		break;
   2582 	}
   2583 
   2584 	/* Reset the chip to a known state. */
   2585 	wm_reset(sc);
   2586 
   2587 	/*
   2588 	 * Check for I21[01] PLL workaround.
   2589 	 *
   2590 	 * Three cases:
   2591 	 * a) Chip is I211.
   2592 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2593 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2594 	 */
   2595 	if (sc->sc_type == WM_T_I211)
   2596 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2597 	if (sc->sc_type == WM_T_I210) {
   2598 		if (!wm_nvm_flash_presence_i210(sc))
   2599 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2600 		else if ((sc->sc_nvm_ver_major < 3)
   2601 		    || ((sc->sc_nvm_ver_major == 3)
   2602 			&& (sc->sc_nvm_ver_minor < 25))) {
   2603 			aprint_verbose_dev(sc->sc_dev,
   2604 			    "ROM image version %d.%d is older than 3.25\n",
   2605 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2606 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2607 		}
   2608 	}
   2609 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2610 		wm_pll_workaround_i210(sc);
   2611 
   2612 	wm_get_wakeup(sc);
   2613 
   2614 	/* Non-AMT based hardware can now take control from firmware */
   2615 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2616 		wm_get_hw_control(sc);
   2617 
   2618 	/*
   2619 	 * Read the Ethernet address from the EEPROM, if not first found
   2620 	 * in device properties.
   2621 	 */
   2622 	ea = prop_dictionary_get(dict, "mac-address");
   2623 	if (ea != NULL) {
   2624 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2625 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2626 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2627 	} else {
   2628 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2629 			aprint_error_dev(sc->sc_dev,
   2630 			    "unable to read Ethernet address\n");
   2631 			goto out;
   2632 		}
   2633 	}
   2634 
   2635 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2636 	    ether_sprintf(enaddr));
   2637 
   2638 	/*
   2639 	 * Read the config info from the EEPROM, and set up various
   2640 	 * bits in the control registers based on their contents.
   2641 	 */
   2642 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2643 	if (pn != NULL) {
   2644 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2645 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2646 	} else {
   2647 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2648 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2649 			goto out;
   2650 		}
   2651 	}
   2652 
   2653 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2654 	if (pn != NULL) {
   2655 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2656 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2657 	} else {
   2658 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2659 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2660 			goto out;
   2661 		}
   2662 	}
   2663 
   2664 	/* check for WM_F_WOL */
   2665 	switch (sc->sc_type) {
   2666 	case WM_T_82542_2_0:
   2667 	case WM_T_82542_2_1:
   2668 	case WM_T_82543:
   2669 		/* dummy? */
   2670 		eeprom_data = 0;
   2671 		apme_mask = NVM_CFG3_APME;
   2672 		break;
   2673 	case WM_T_82544:
   2674 		apme_mask = NVM_CFG2_82544_APM_EN;
   2675 		eeprom_data = cfg2;
   2676 		break;
   2677 	case WM_T_82546:
   2678 	case WM_T_82546_3:
   2679 	case WM_T_82571:
   2680 	case WM_T_82572:
   2681 	case WM_T_82573:
   2682 	case WM_T_82574:
   2683 	case WM_T_82583:
   2684 	case WM_T_80003:
   2685 	case WM_T_82575:
   2686 	case WM_T_82576:
   2687 		apme_mask = NVM_CFG3_APME;
   2688 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2689 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2690 		break;
   2691 	case WM_T_82580:
   2692 	case WM_T_I350:
   2693 	case WM_T_I354:
   2694 	case WM_T_I210:
   2695 	case WM_T_I211:
   2696 		apme_mask = NVM_CFG3_APME;
   2697 		wm_nvm_read(sc,
   2698 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2699 		    1, &eeprom_data);
   2700 		break;
   2701 	case WM_T_ICH8:
   2702 	case WM_T_ICH9:
   2703 	case WM_T_ICH10:
   2704 	case WM_T_PCH:
   2705 	case WM_T_PCH2:
   2706 	case WM_T_PCH_LPT:
   2707 	case WM_T_PCH_SPT:
   2708 	case WM_T_PCH_CNP:
   2709 		/* Already checked before wm_reset () */
   2710 		apme_mask = eeprom_data = 0;
   2711 		break;
   2712 	default: /* XXX 82540 */
   2713 		apme_mask = NVM_CFG3_APME;
   2714 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2715 		break;
   2716 	}
   2717 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2718 	if ((eeprom_data & apme_mask) != 0)
   2719 		sc->sc_flags |= WM_F_WOL;
   2720 
   2721 	/*
   2722 	 * We have the eeprom settings, now apply the special cases
   2723 	 * where the eeprom may be wrong or the board won't support
   2724 	 * wake on lan on a particular port
   2725 	 */
   2726 	switch (sc->sc_pcidevid) {
   2727 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2728 		sc->sc_flags &= ~WM_F_WOL;
   2729 		break;
   2730 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2731 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2732 		/* Wake events only supported on port A for dual fiber
   2733 		 * regardless of eeprom setting */
   2734 		if (sc->sc_funcid == 1)
   2735 			sc->sc_flags &= ~WM_F_WOL;
   2736 		break;
   2737 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2738 		/* If quad port adapter, disable WoL on all but port A */
   2739 		if (sc->sc_funcid != 0)
   2740 			sc->sc_flags &= ~WM_F_WOL;
   2741 		break;
   2742 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2743 		/* Wake events only supported on port A for dual fiber
   2744 		 * regardless of eeprom setting */
   2745 		if (sc->sc_funcid == 1)
   2746 			sc->sc_flags &= ~WM_F_WOL;
   2747 		break;
   2748 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2749 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2750 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2751 		/* If quad port adapter, disable WoL on all but port A */
   2752 		if (sc->sc_funcid != 0)
   2753 			sc->sc_flags &= ~WM_F_WOL;
   2754 		break;
   2755 	}
   2756 
   2757 	if (sc->sc_type >= WM_T_82575) {
   2758 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2759 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2760 			    nvmword);
   2761 			if ((sc->sc_type == WM_T_82575) ||
   2762 			    (sc->sc_type == WM_T_82576)) {
   2763 				/* Check NVM for autonegotiation */
   2764 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2765 				    != 0)
   2766 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2767 			}
   2768 			if ((sc->sc_type == WM_T_82575) ||
   2769 			    (sc->sc_type == WM_T_I350)) {
   2770 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2771 					sc->sc_flags |= WM_F_MAS;
   2772 			}
   2773 		}
   2774 	}
   2775 
   2776 	/*
   2777 	 * XXX need special handling for some multiple port cards
   2778 	 * to disable a paticular port.
   2779 	 */
   2780 
   2781 	if (sc->sc_type >= WM_T_82544) {
   2782 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2783 		if (pn != NULL) {
   2784 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2785 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2786 		} else {
   2787 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2788 				aprint_error_dev(sc->sc_dev,
   2789 				    "unable to read SWDPIN\n");
   2790 				goto out;
   2791 			}
   2792 		}
   2793 	}
   2794 
   2795 	if (cfg1 & NVM_CFG1_ILOS)
   2796 		sc->sc_ctrl |= CTRL_ILOS;
   2797 
   2798 	/*
   2799 	 * XXX
   2800 	 * This code isn't correct because pin 2 and 3 are located
   2801 	 * in different position on newer chips. Check all datasheet.
   2802 	 *
   2803 	 * Until resolve this problem, check if a chip < 82580
   2804 	 */
   2805 	if (sc->sc_type <= WM_T_82580) {
   2806 		if (sc->sc_type >= WM_T_82544) {
   2807 			sc->sc_ctrl |=
   2808 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2809 			    CTRL_SWDPIO_SHIFT;
   2810 			sc->sc_ctrl |=
   2811 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2812 			    CTRL_SWDPINS_SHIFT;
   2813 		} else {
   2814 			sc->sc_ctrl |=
   2815 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2816 			    CTRL_SWDPIO_SHIFT;
   2817 		}
   2818 	}
   2819 
   2820 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2821 		wm_nvm_read(sc,
   2822 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2823 		    1, &nvmword);
   2824 		if (nvmword & NVM_CFG3_ILOS)
   2825 			sc->sc_ctrl |= CTRL_ILOS;
   2826 	}
   2827 
   2828 #if 0
   2829 	if (sc->sc_type >= WM_T_82544) {
   2830 		if (cfg1 & NVM_CFG1_IPS0)
   2831 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2832 		if (cfg1 & NVM_CFG1_IPS1)
   2833 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2834 		sc->sc_ctrl_ext |=
   2835 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2836 		    CTRL_EXT_SWDPIO_SHIFT;
   2837 		sc->sc_ctrl_ext |=
   2838 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2839 		    CTRL_EXT_SWDPINS_SHIFT;
   2840 	} else {
   2841 		sc->sc_ctrl_ext |=
   2842 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2843 		    CTRL_EXT_SWDPIO_SHIFT;
   2844 	}
   2845 #endif
   2846 
   2847 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2848 #if 0
   2849 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2850 #endif
   2851 
   2852 	if (sc->sc_type == WM_T_PCH) {
   2853 		uint16_t val;
   2854 
   2855 		/* Save the NVM K1 bit setting */
   2856 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2857 
   2858 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2859 			sc->sc_nvm_k1_enabled = 1;
   2860 		else
   2861 			sc->sc_nvm_k1_enabled = 0;
   2862 	}
   2863 
   2864 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2865 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2866 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2867 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2868 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2869 	    || sc->sc_type == WM_T_82573
   2870 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2871 		/* Copper only */
   2872 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2873 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2874 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2875 	    || (sc->sc_type ==WM_T_I211)) {
   2876 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2877 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2878 		switch (link_mode) {
   2879 		case CTRL_EXT_LINK_MODE_1000KX:
   2880 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2881 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2882 			break;
   2883 		case CTRL_EXT_LINK_MODE_SGMII:
   2884 			if (wm_sgmii_uses_mdio(sc)) {
   2885 				aprint_normal_dev(sc->sc_dev,
   2886 				    "SGMII(MDIO)\n");
   2887 				sc->sc_flags |= WM_F_SGMII;
   2888 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2889 				break;
   2890 			}
   2891 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2892 			/*FALLTHROUGH*/
   2893 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2894 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2895 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2896 				if (link_mode
   2897 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2898 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2899 					sc->sc_flags |= WM_F_SGMII;
   2900 					aprint_verbose_dev(sc->sc_dev,
   2901 					    "SGMII\n");
   2902 				} else {
   2903 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2904 					aprint_verbose_dev(sc->sc_dev,
   2905 					    "SERDES\n");
   2906 				}
   2907 				break;
   2908 			}
   2909 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2910 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2911 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2912 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2913 				sc->sc_flags |= WM_F_SGMII;
   2914 			}
   2915 			/* Do not change link mode for 100BaseFX */
   2916 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   2917 				break;
   2918 
   2919 			/* Change current link mode setting */
   2920 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2921 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2922 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2923 			else
   2924 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2925 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2926 			break;
   2927 		case CTRL_EXT_LINK_MODE_GMII:
   2928 		default:
   2929 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   2930 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2931 			break;
   2932 		}
   2933 
   2934 		reg &= ~CTRL_EXT_I2C_ENA;
   2935 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2936 			reg |= CTRL_EXT_I2C_ENA;
   2937 		else
   2938 			reg &= ~CTRL_EXT_I2C_ENA;
   2939 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2940 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   2941 			if (!wm_sgmii_uses_mdio(sc))
   2942 				wm_gmii_setup_phytype(sc, 0, 0);
   2943 			wm_reset_mdicnfg_82580(sc);
   2944 		}
   2945 	} else if (sc->sc_type < WM_T_82543 ||
   2946 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2947 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2948 			aprint_error_dev(sc->sc_dev,
   2949 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2950 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2951 		}
   2952 	} else {
   2953 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2954 			aprint_error_dev(sc->sc_dev,
   2955 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2956 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2957 		}
   2958 	}
   2959 
   2960 	if (sc->sc_type >= WM_T_PCH2)
   2961 		sc->sc_flags |= WM_F_EEE;
   2962 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2963 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2964 		/* XXX: Need special handling for I354. (not yet) */
   2965 		if (sc->sc_type != WM_T_I354)
   2966 			sc->sc_flags |= WM_F_EEE;
   2967 	}
   2968 
   2969 	/*
   2970 	 * The I350 has a bug where it always strips the CRC whether
   2971 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   2972 	 */
   2973 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   2974 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   2975 		sc->sc_flags |= WM_F_CRC_STRIP;
   2976 
   2977 	/* Set device properties (macflags) */
   2978 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2979 
   2980 	if (sc->sc_flags != 0) {
   2981 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2982 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2983 	}
   2984 
   2985 #ifdef WM_MPSAFE
   2986 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2987 #else
   2988 	sc->sc_core_lock = NULL;
   2989 #endif
   2990 
   2991 	/* Initialize the media structures accordingly. */
   2992 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2993 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2994 	else
   2995 		wm_tbi_mediainit(sc); /* All others */
   2996 
   2997 	ifp = &sc->sc_ethercom.ec_if;
   2998 	xname = device_xname(sc->sc_dev);
   2999 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   3000 	ifp->if_softc = sc;
   3001 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   3002 #ifdef WM_MPSAFE
   3003 	ifp->if_extflags = IFEF_MPSAFE;
   3004 #endif
   3005 	ifp->if_ioctl = wm_ioctl;
   3006 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3007 		ifp->if_start = wm_nq_start;
   3008 		/*
   3009 		 * When the number of CPUs is one and the controller can use
   3010 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   3011 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   3012 		 * and the other is used for link status changing.
   3013 		 * In this situation, wm_nq_transmit() is disadvantageous
   3014 		 * because of wm_select_txqueue() and pcq(9) overhead.
   3015 		 */
   3016 		if (wm_is_using_multiqueue(sc))
   3017 			ifp->if_transmit = wm_nq_transmit;
   3018 	} else {
   3019 		ifp->if_start = wm_start;
   3020 		/*
   3021 		 * wm_transmit() has the same disadvantage as wm_transmit().
   3022 		 */
   3023 		if (wm_is_using_multiqueue(sc))
   3024 			ifp->if_transmit = wm_transmit;
   3025 	}
   3026 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   3027 	ifp->if_init = wm_init;
   3028 	ifp->if_stop = wm_stop;
   3029 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   3030 	IFQ_SET_READY(&ifp->if_snd);
   3031 
   3032 	/* Check for jumbo frame */
   3033 	switch (sc->sc_type) {
   3034 	case WM_T_82573:
   3035 		/* XXX limited to 9234 if ASPM is disabled */
   3036 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   3037 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   3038 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3039 		break;
   3040 	case WM_T_82571:
   3041 	case WM_T_82572:
   3042 	case WM_T_82574:
   3043 	case WM_T_82583:
   3044 	case WM_T_82575:
   3045 	case WM_T_82576:
   3046 	case WM_T_82580:
   3047 	case WM_T_I350:
   3048 	case WM_T_I354:
   3049 	case WM_T_I210:
   3050 	case WM_T_I211:
   3051 	case WM_T_80003:
   3052 	case WM_T_ICH9:
   3053 	case WM_T_ICH10:
   3054 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   3055 	case WM_T_PCH_LPT:
   3056 	case WM_T_PCH_SPT:
   3057 	case WM_T_PCH_CNP:
   3058 		/* XXX limited to 9234 */
   3059 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3060 		break;
   3061 	case WM_T_PCH:
   3062 		/* XXX limited to 4096 */
   3063 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3064 		break;
   3065 	case WM_T_82542_2_0:
   3066 	case WM_T_82542_2_1:
   3067 	case WM_T_ICH8:
   3068 		/* No support for jumbo frame */
   3069 		break;
   3070 	default:
   3071 		/* ETHER_MAX_LEN_JUMBO */
   3072 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3073 		break;
   3074 	}
   3075 
   3076 	/* If we're a i82543 or greater, we can support VLANs. */
   3077 	if (sc->sc_type >= WM_T_82543) {
   3078 		sc->sc_ethercom.ec_capabilities |=
   3079 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   3080 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   3081 	}
   3082 
   3083 	if ((sc->sc_flags & WM_F_EEE) != 0)
   3084 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   3085 
   3086 	/*
   3087 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   3088 	 * on i82543 and later.
   3089 	 */
   3090 	if (sc->sc_type >= WM_T_82543) {
   3091 		ifp->if_capabilities |=
   3092 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3093 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3094 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3095 		    IFCAP_CSUM_TCPv6_Tx |
   3096 		    IFCAP_CSUM_UDPv6_Tx;
   3097 	}
   3098 
   3099 	/*
   3100 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3101 	 *
   3102 	 *	82541GI (8086:1076) ... no
   3103 	 *	82572EI (8086:10b9) ... yes
   3104 	 */
   3105 	if (sc->sc_type >= WM_T_82571) {
   3106 		ifp->if_capabilities |=
   3107 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3108 	}
   3109 
   3110 	/*
   3111 	 * If we're a i82544 or greater (except i82547), we can do
   3112 	 * TCP segmentation offload.
   3113 	 */
   3114 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   3115 		ifp->if_capabilities |= IFCAP_TSOv4;
   3116 	}
   3117 
   3118 	if (sc->sc_type >= WM_T_82571) {
   3119 		ifp->if_capabilities |= IFCAP_TSOv6;
   3120 	}
   3121 
   3122 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3123 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3124 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3125 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3126 
   3127 	/* Attach the interface. */
   3128 	if_initialize(ifp);
   3129 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3130 	ether_ifattach(ifp, enaddr);
   3131 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3132 	if_register(ifp);
   3133 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3134 	    RND_FLAG_DEFAULT);
   3135 
   3136 #ifdef WM_EVENT_COUNTERS
   3137 	/* Attach event counters. */
   3138 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3139 	    NULL, xname, "linkintr");
   3140 
   3141 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3142 	    NULL, xname, "tx_xoff");
   3143 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3144 	    NULL, xname, "tx_xon");
   3145 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3146 	    NULL, xname, "rx_xoff");
   3147 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3148 	    NULL, xname, "rx_xon");
   3149 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3150 	    NULL, xname, "rx_macctl");
   3151 #endif /* WM_EVENT_COUNTERS */
   3152 
   3153 	sc->sc_txrx_use_workqueue = false;
   3154 
   3155 	if (wm_phy_need_linkdown_discard(sc))
   3156 		wm_set_linkdown_discard(sc);
   3157 
   3158 	wm_init_sysctls(sc);
   3159 
   3160 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3161 		pmf_class_network_register(self, ifp);
   3162 	else
   3163 		aprint_error_dev(self, "couldn't establish power handler\n");
   3164 
   3165 	sc->sc_flags |= WM_F_ATTACHED;
   3166 out:
   3167 	return;
   3168 }
   3169 
   3170 /* The detach function (ca_detach) */
   3171 static int
   3172 wm_detach(device_t self, int flags __unused)
   3173 {
   3174 	struct wm_softc *sc = device_private(self);
   3175 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3176 	int i;
   3177 
   3178 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3179 		return 0;
   3180 
   3181 	/* Stop the interface. Callouts are stopped in it. */
   3182 	wm_stop(ifp, 1);
   3183 
   3184 	pmf_device_deregister(self);
   3185 
   3186 	sysctl_teardown(&sc->sc_sysctllog);
   3187 
   3188 #ifdef WM_EVENT_COUNTERS
   3189 	evcnt_detach(&sc->sc_ev_linkintr);
   3190 
   3191 	evcnt_detach(&sc->sc_ev_tx_xoff);
   3192 	evcnt_detach(&sc->sc_ev_tx_xon);
   3193 	evcnt_detach(&sc->sc_ev_rx_xoff);
   3194 	evcnt_detach(&sc->sc_ev_rx_xon);
   3195 	evcnt_detach(&sc->sc_ev_rx_macctl);
   3196 #endif /* WM_EVENT_COUNTERS */
   3197 
   3198 	rnd_detach_source(&sc->rnd_source);
   3199 
   3200 	/* Tell the firmware about the release */
   3201 	WM_CORE_LOCK(sc);
   3202 	wm_release_manageability(sc);
   3203 	wm_release_hw_control(sc);
   3204 	wm_enable_wakeup(sc);
   3205 	WM_CORE_UNLOCK(sc);
   3206 
   3207 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3208 
   3209 	ether_ifdetach(ifp);
   3210 	if_detach(ifp);
   3211 	if_percpuq_destroy(sc->sc_ipq);
   3212 
   3213 	/* Delete all remaining media. */
   3214 	ifmedia_fini(&sc->sc_mii.mii_media);
   3215 
   3216 	/* Unload RX dmamaps and free mbufs */
   3217 	for (i = 0; i < sc->sc_nqueues; i++) {
   3218 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3219 		mutex_enter(rxq->rxq_lock);
   3220 		wm_rxdrain(rxq);
   3221 		mutex_exit(rxq->rxq_lock);
   3222 	}
   3223 	/* Must unlock here */
   3224 
   3225 	/* Disestablish the interrupt handler */
   3226 	for (i = 0; i < sc->sc_nintrs; i++) {
   3227 		if (sc->sc_ihs[i] != NULL) {
   3228 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3229 			sc->sc_ihs[i] = NULL;
   3230 		}
   3231 	}
   3232 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3233 
   3234 	/* wm_stop() ensure workqueue is stopped. */
   3235 	workqueue_destroy(sc->sc_queue_wq);
   3236 
   3237 	for (i = 0; i < sc->sc_nqueues; i++)
   3238 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3239 
   3240 	wm_free_txrx_queues(sc);
   3241 
   3242 	/* Unmap the registers */
   3243 	if (sc->sc_ss) {
   3244 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3245 		sc->sc_ss = 0;
   3246 	}
   3247 	if (sc->sc_ios) {
   3248 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3249 		sc->sc_ios = 0;
   3250 	}
   3251 	if (sc->sc_flashs) {
   3252 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3253 		sc->sc_flashs = 0;
   3254 	}
   3255 
   3256 	if (sc->sc_core_lock)
   3257 		mutex_obj_free(sc->sc_core_lock);
   3258 	if (sc->sc_ich_phymtx)
   3259 		mutex_obj_free(sc->sc_ich_phymtx);
   3260 	if (sc->sc_ich_nvmmtx)
   3261 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3262 
   3263 	return 0;
   3264 }
   3265 
   3266 static bool
   3267 wm_suspend(device_t self, const pmf_qual_t *qual)
   3268 {
   3269 	struct wm_softc *sc = device_private(self);
   3270 
   3271 	wm_release_manageability(sc);
   3272 	wm_release_hw_control(sc);
   3273 	wm_enable_wakeup(sc);
   3274 
   3275 	return true;
   3276 }
   3277 
   3278 static bool
   3279 wm_resume(device_t self, const pmf_qual_t *qual)
   3280 {
   3281 	struct wm_softc *sc = device_private(self);
   3282 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3283 	pcireg_t reg;
   3284 	char buf[256];
   3285 
   3286 	reg = CSR_READ(sc, WMREG_WUS);
   3287 	if (reg != 0) {
   3288 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3289 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3290 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3291 	}
   3292 
   3293 	if (sc->sc_type >= WM_T_PCH2)
   3294 		wm_resume_workarounds_pchlan(sc);
   3295 	if ((ifp->if_flags & IFF_UP) == 0) {
   3296 		wm_reset(sc);
   3297 		/* Non-AMT based hardware can now take control from firmware */
   3298 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3299 			wm_get_hw_control(sc);
   3300 		wm_init_manageability(sc);
   3301 	} else {
   3302 		/*
   3303 		 * We called pmf_class_network_register(), so if_init() is
   3304 		 * automatically called when IFF_UP. wm_reset(),
   3305 		 * wm_get_hw_control() and wm_init_manageability() are called
   3306 		 * via wm_init().
   3307 		 */
   3308 	}
   3309 
   3310 	return true;
   3311 }
   3312 
   3313 /*
   3314  * wm_watchdog:		[ifnet interface function]
   3315  *
   3316  *	Watchdog timer handler.
   3317  */
   3318 static void
   3319 wm_watchdog(struct ifnet *ifp)
   3320 {
   3321 	int qid;
   3322 	struct wm_softc *sc = ifp->if_softc;
   3323 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3324 
   3325 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3326 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3327 
   3328 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3329 	}
   3330 
   3331 	/* IF any of queues hanged up, reset the interface. */
   3332 	if (hang_queue != 0) {
   3333 		(void)wm_init(ifp);
   3334 
   3335 		/*
   3336 		 * There are still some upper layer processing which call
   3337 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3338 		 */
   3339 		/* Try to get more packets going. */
   3340 		ifp->if_start(ifp);
   3341 	}
   3342 }
   3343 
   3344 
   3345 static void
   3346 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3347 {
   3348 
   3349 	mutex_enter(txq->txq_lock);
   3350 	if (txq->txq_sending &&
   3351 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3352 		wm_watchdog_txq_locked(ifp, txq, hang);
   3353 
   3354 	mutex_exit(txq->txq_lock);
   3355 }
   3356 
   3357 static void
   3358 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3359     uint16_t *hang)
   3360 {
   3361 	struct wm_softc *sc = ifp->if_softc;
   3362 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3363 
   3364 	KASSERT(mutex_owned(txq->txq_lock));
   3365 
   3366 	/*
   3367 	 * Since we're using delayed interrupts, sweep up
   3368 	 * before we report an error.
   3369 	 */
   3370 	wm_txeof(txq, UINT_MAX);
   3371 
   3372 	if (txq->txq_sending)
   3373 		*hang |= __BIT(wmq->wmq_id);
   3374 
   3375 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3376 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3377 		    device_xname(sc->sc_dev));
   3378 	} else {
   3379 #ifdef WM_DEBUG
   3380 		int i, j;
   3381 		struct wm_txsoft *txs;
   3382 #endif
   3383 		log(LOG_ERR,
   3384 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3385 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3386 		    txq->txq_next);
   3387 		if_statinc(ifp, if_oerrors);
   3388 #ifdef WM_DEBUG
   3389 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3390 		    i = WM_NEXTTXS(txq, i)) {
   3391 			txs = &txq->txq_soft[i];
   3392 			printf("txs %d tx %d -> %d\n",
   3393 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3394 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3395 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3396 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3397 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3398 					printf("\t %#08x%08x\n",
   3399 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3400 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3401 				} else {
   3402 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3403 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3404 					    txq->txq_descs[j].wtx_addr.wa_low);
   3405 					printf("\t %#04x%02x%02x%08x\n",
   3406 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3407 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3408 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3409 					    txq->txq_descs[j].wtx_cmdlen);
   3410 				}
   3411 				if (j == txs->txs_lastdesc)
   3412 					break;
   3413 			}
   3414 		}
   3415 #endif
   3416 	}
   3417 }
   3418 
   3419 /*
   3420  * wm_tick:
   3421  *
   3422  *	One second timer, used to check link status, sweep up
   3423  *	completed transmit jobs, etc.
   3424  */
   3425 static void
   3426 wm_tick(void *arg)
   3427 {
   3428 	struct wm_softc *sc = arg;
   3429 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3430 #ifndef WM_MPSAFE
   3431 	int s = splnet();
   3432 #endif
   3433 
   3434 	WM_CORE_LOCK(sc);
   3435 
   3436 	if (sc->sc_core_stopping) {
   3437 		WM_CORE_UNLOCK(sc);
   3438 #ifndef WM_MPSAFE
   3439 		splx(s);
   3440 #endif
   3441 		return;
   3442 	}
   3443 
   3444 	if (sc->sc_type >= WM_T_82542_2_1) {
   3445 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3446 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3447 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3448 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3449 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3450 	}
   3451 
   3452 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   3453 	if_statadd_ref(nsr, if_collisions, CSR_READ(sc, WMREG_COLC));
   3454 	if_statadd_ref(nsr, if_ierrors, 0ULL /* ensure quad_t */
   3455 	    + CSR_READ(sc, WMREG_CRCERRS)
   3456 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3457 	    + CSR_READ(sc, WMREG_SYMERRC)
   3458 	    + CSR_READ(sc, WMREG_RXERRC)
   3459 	    + CSR_READ(sc, WMREG_SEC)
   3460 	    + CSR_READ(sc, WMREG_CEXTERR)
   3461 	    + CSR_READ(sc, WMREG_RLEC));
   3462 	/*
   3463 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3464 	 * memory. It does not mean the number of dropped packet. Because
   3465 	 * ethernet controller can receive packets in such case if there is
   3466 	 * space in phy's FIFO.
   3467 	 *
   3468 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3469 	 * own EVCNT instead of if_iqdrops.
   3470 	 */
   3471 	if_statadd_ref(nsr, if_iqdrops, CSR_READ(sc, WMREG_MPC));
   3472 	IF_STAT_PUTREF(ifp);
   3473 
   3474 	if (sc->sc_flags & WM_F_HAS_MII)
   3475 		mii_tick(&sc->sc_mii);
   3476 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3477 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3478 		wm_serdes_tick(sc);
   3479 	else
   3480 		wm_tbi_tick(sc);
   3481 
   3482 	WM_CORE_UNLOCK(sc);
   3483 
   3484 	wm_watchdog(ifp);
   3485 
   3486 	callout_schedule(&sc->sc_tick_ch, hz);
   3487 }
   3488 
   3489 static int
   3490 wm_ifflags_cb(struct ethercom *ec)
   3491 {
   3492 	struct ifnet *ifp = &ec->ec_if;
   3493 	struct wm_softc *sc = ifp->if_softc;
   3494 	u_short iffchange;
   3495 	int ecchange;
   3496 	bool needreset = false;
   3497 	int rc = 0;
   3498 
   3499 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3500 		device_xname(sc->sc_dev), __func__));
   3501 
   3502 	WM_CORE_LOCK(sc);
   3503 
   3504 	/*
   3505 	 * Check for if_flags.
   3506 	 * Main usage is to prevent linkdown when opening bpf.
   3507 	 */
   3508 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3509 	sc->sc_if_flags = ifp->if_flags;
   3510 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3511 		needreset = true;
   3512 		goto ec;
   3513 	}
   3514 
   3515 	/* iff related updates */
   3516 	if ((iffchange & IFF_PROMISC) != 0)
   3517 		wm_set_filter(sc);
   3518 
   3519 	wm_set_vlan(sc);
   3520 
   3521 ec:
   3522 	/* Check for ec_capenable. */
   3523 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3524 	sc->sc_ec_capenable = ec->ec_capenable;
   3525 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3526 		needreset = true;
   3527 		goto out;
   3528 	}
   3529 
   3530 	/* ec related updates */
   3531 	wm_set_eee(sc);
   3532 
   3533 out:
   3534 	if (needreset)
   3535 		rc = ENETRESET;
   3536 	WM_CORE_UNLOCK(sc);
   3537 
   3538 	return rc;
   3539 }
   3540 
   3541 static bool
   3542 wm_phy_need_linkdown_discard(struct wm_softc *sc)
   3543 {
   3544 
   3545 	switch (sc->sc_phytype) {
   3546 	case WMPHY_82577: /* ihphy */
   3547 	case WMPHY_82578: /* atphy */
   3548 	case WMPHY_82579: /* ihphy */
   3549 	case WMPHY_I217: /* ihphy */
   3550 	case WMPHY_82580: /* ihphy */
   3551 	case WMPHY_I350: /* ihphy */
   3552 		return true;
   3553 	default:
   3554 		return false;
   3555 	}
   3556 }
   3557 
   3558 static void
   3559 wm_set_linkdown_discard(struct wm_softc *sc)
   3560 {
   3561 
   3562 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3563 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3564 
   3565 		mutex_enter(txq->txq_lock);
   3566 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
   3567 		mutex_exit(txq->txq_lock);
   3568 	}
   3569 }
   3570 
   3571 static void
   3572 wm_clear_linkdown_discard(struct wm_softc *sc)
   3573 {
   3574 
   3575 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3576 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3577 
   3578 		mutex_enter(txq->txq_lock);
   3579 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
   3580 		mutex_exit(txq->txq_lock);
   3581 	}
   3582 }
   3583 
   3584 /*
   3585  * wm_ioctl:		[ifnet interface function]
   3586  *
   3587  *	Handle control requests from the operator.
   3588  */
   3589 static int
   3590 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3591 {
   3592 	struct wm_softc *sc = ifp->if_softc;
   3593 	struct ifreq *ifr = (struct ifreq *)data;
   3594 	struct ifaddr *ifa = (struct ifaddr *)data;
   3595 	struct sockaddr_dl *sdl;
   3596 	int s, error;
   3597 
   3598 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3599 		device_xname(sc->sc_dev), __func__));
   3600 
   3601 #ifndef WM_MPSAFE
   3602 	s = splnet();
   3603 #endif
   3604 	switch (cmd) {
   3605 	case SIOCSIFMEDIA:
   3606 		WM_CORE_LOCK(sc);
   3607 		/* Flow control requires full-duplex mode. */
   3608 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3609 		    (ifr->ifr_media & IFM_FDX) == 0)
   3610 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3611 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3612 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3613 				/* We can do both TXPAUSE and RXPAUSE. */
   3614 				ifr->ifr_media |=
   3615 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3616 			}
   3617 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3618 		}
   3619 		WM_CORE_UNLOCK(sc);
   3620 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3621 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
   3622 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE)
   3623 				wm_set_linkdown_discard(sc);
   3624 			else
   3625 				wm_clear_linkdown_discard(sc);
   3626 		}
   3627 		break;
   3628 	case SIOCINITIFADDR:
   3629 		WM_CORE_LOCK(sc);
   3630 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3631 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3632 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3633 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3634 			/* Unicast address is the first multicast entry */
   3635 			wm_set_filter(sc);
   3636 			error = 0;
   3637 			WM_CORE_UNLOCK(sc);
   3638 			break;
   3639 		}
   3640 		WM_CORE_UNLOCK(sc);
   3641 		if (((ifp->if_flags & IFF_UP) == 0) && wm_phy_need_linkdown_discard(sc))
   3642 			wm_clear_linkdown_discard(sc);
   3643 		/*FALLTHROUGH*/
   3644 	default:
   3645 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
   3646 			if (((ifp->if_flags & IFF_UP) == 0) && ((ifr->ifr_flags & IFF_UP) != 0)) {
   3647 				wm_clear_linkdown_discard(sc);
   3648 			} else if (((ifp->if_flags & IFF_UP) != 0) && ((ifr->ifr_flags & IFF_UP) == 0)) {
   3649 				wm_set_linkdown_discard(sc);
   3650 			}
   3651 		}
   3652 #ifdef WM_MPSAFE
   3653 		s = splnet();
   3654 #endif
   3655 		/* It may call wm_start, so unlock here */
   3656 		error = ether_ioctl(ifp, cmd, data);
   3657 #ifdef WM_MPSAFE
   3658 		splx(s);
   3659 #endif
   3660 		if (error != ENETRESET)
   3661 			break;
   3662 
   3663 		error = 0;
   3664 
   3665 		if (cmd == SIOCSIFCAP)
   3666 			error = (*ifp->if_init)(ifp);
   3667 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3668 			;
   3669 		else if (ifp->if_flags & IFF_RUNNING) {
   3670 			/*
   3671 			 * Multicast list has changed; set the hardware filter
   3672 			 * accordingly.
   3673 			 */
   3674 			WM_CORE_LOCK(sc);
   3675 			wm_set_filter(sc);
   3676 			WM_CORE_UNLOCK(sc);
   3677 		}
   3678 		break;
   3679 	}
   3680 
   3681 #ifndef WM_MPSAFE
   3682 	splx(s);
   3683 #endif
   3684 	return error;
   3685 }
   3686 
   3687 /* MAC address related */
   3688 
   3689 /*
   3690  * Get the offset of MAC address and return it.
   3691  * If error occured, use offset 0.
   3692  */
   3693 static uint16_t
   3694 wm_check_alt_mac_addr(struct wm_softc *sc)
   3695 {
   3696 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3697 	uint16_t offset = NVM_OFF_MACADDR;
   3698 
   3699 	/* Try to read alternative MAC address pointer */
   3700 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3701 		return 0;
   3702 
   3703 	/* Check pointer if it's valid or not. */
   3704 	if ((offset == 0x0000) || (offset == 0xffff))
   3705 		return 0;
   3706 
   3707 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3708 	/*
   3709 	 * Check whether alternative MAC address is valid or not.
   3710 	 * Some cards have non 0xffff pointer but those don't use
   3711 	 * alternative MAC address in reality.
   3712 	 *
   3713 	 * Check whether the broadcast bit is set or not.
   3714 	 */
   3715 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3716 		if (((myea[0] & 0xff) & 0x01) == 0)
   3717 			return offset; /* Found */
   3718 
   3719 	/* Not found */
   3720 	return 0;
   3721 }
   3722 
   3723 static int
   3724 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3725 {
   3726 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3727 	uint16_t offset = NVM_OFF_MACADDR;
   3728 	int do_invert = 0;
   3729 
   3730 	switch (sc->sc_type) {
   3731 	case WM_T_82580:
   3732 	case WM_T_I350:
   3733 	case WM_T_I354:
   3734 		/* EEPROM Top Level Partitioning */
   3735 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3736 		break;
   3737 	case WM_T_82571:
   3738 	case WM_T_82575:
   3739 	case WM_T_82576:
   3740 	case WM_T_80003:
   3741 	case WM_T_I210:
   3742 	case WM_T_I211:
   3743 		offset = wm_check_alt_mac_addr(sc);
   3744 		if (offset == 0)
   3745 			if ((sc->sc_funcid & 0x01) == 1)
   3746 				do_invert = 1;
   3747 		break;
   3748 	default:
   3749 		if ((sc->sc_funcid & 0x01) == 1)
   3750 			do_invert = 1;
   3751 		break;
   3752 	}
   3753 
   3754 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3755 		goto bad;
   3756 
   3757 	enaddr[0] = myea[0] & 0xff;
   3758 	enaddr[1] = myea[0] >> 8;
   3759 	enaddr[2] = myea[1] & 0xff;
   3760 	enaddr[3] = myea[1] >> 8;
   3761 	enaddr[4] = myea[2] & 0xff;
   3762 	enaddr[5] = myea[2] >> 8;
   3763 
   3764 	/*
   3765 	 * Toggle the LSB of the MAC address on the second port
   3766 	 * of some dual port cards.
   3767 	 */
   3768 	if (do_invert != 0)
   3769 		enaddr[5] ^= 1;
   3770 
   3771 	return 0;
   3772 
   3773  bad:
   3774 	return -1;
   3775 }
   3776 
   3777 /*
   3778  * wm_set_ral:
   3779  *
   3780  *	Set an entery in the receive address list.
   3781  */
   3782 static void
   3783 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3784 {
   3785 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3786 	uint32_t wlock_mac;
   3787 	int rv;
   3788 
   3789 	if (enaddr != NULL) {
   3790 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   3791 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   3792 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   3793 		ral_hi |= RAL_AV;
   3794 	} else {
   3795 		ral_lo = 0;
   3796 		ral_hi = 0;
   3797 	}
   3798 
   3799 	switch (sc->sc_type) {
   3800 	case WM_T_82542_2_0:
   3801 	case WM_T_82542_2_1:
   3802 	case WM_T_82543:
   3803 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3804 		CSR_WRITE_FLUSH(sc);
   3805 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3806 		CSR_WRITE_FLUSH(sc);
   3807 		break;
   3808 	case WM_T_PCH2:
   3809 	case WM_T_PCH_LPT:
   3810 	case WM_T_PCH_SPT:
   3811 	case WM_T_PCH_CNP:
   3812 		if (idx == 0) {
   3813 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3814 			CSR_WRITE_FLUSH(sc);
   3815 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3816 			CSR_WRITE_FLUSH(sc);
   3817 			return;
   3818 		}
   3819 		if (sc->sc_type != WM_T_PCH2) {
   3820 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3821 			    FWSM_WLOCK_MAC);
   3822 			addrl = WMREG_SHRAL(idx - 1);
   3823 			addrh = WMREG_SHRAH(idx - 1);
   3824 		} else {
   3825 			wlock_mac = 0;
   3826 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3827 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3828 		}
   3829 
   3830 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3831 			rv = wm_get_swflag_ich8lan(sc);
   3832 			if (rv != 0)
   3833 				return;
   3834 			CSR_WRITE(sc, addrl, ral_lo);
   3835 			CSR_WRITE_FLUSH(sc);
   3836 			CSR_WRITE(sc, addrh, ral_hi);
   3837 			CSR_WRITE_FLUSH(sc);
   3838 			wm_put_swflag_ich8lan(sc);
   3839 		}
   3840 
   3841 		break;
   3842 	default:
   3843 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3844 		CSR_WRITE_FLUSH(sc);
   3845 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3846 		CSR_WRITE_FLUSH(sc);
   3847 		break;
   3848 	}
   3849 }
   3850 
   3851 /*
   3852  * wm_mchash:
   3853  *
   3854  *	Compute the hash of the multicast address for the 4096-bit
   3855  *	multicast filter.
   3856  */
   3857 static uint32_t
   3858 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3859 {
   3860 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3861 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3862 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3863 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3864 	uint32_t hash;
   3865 
   3866 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3867 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3868 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3869 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3870 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3871 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3872 		return (hash & 0x3ff);
   3873 	}
   3874 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3875 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3876 
   3877 	return (hash & 0xfff);
   3878 }
   3879 
   3880 /*
   3881  *
   3882  *
   3883  */
   3884 static int
   3885 wm_rar_count(struct wm_softc *sc)
   3886 {
   3887 	int size;
   3888 
   3889 	switch (sc->sc_type) {
   3890 	case WM_T_ICH8:
   3891 		size = WM_RAL_TABSIZE_ICH8 -1;
   3892 		break;
   3893 	case WM_T_ICH9:
   3894 	case WM_T_ICH10:
   3895 	case WM_T_PCH:
   3896 		size = WM_RAL_TABSIZE_ICH8;
   3897 		break;
   3898 	case WM_T_PCH2:
   3899 		size = WM_RAL_TABSIZE_PCH2;
   3900 		break;
   3901 	case WM_T_PCH_LPT:
   3902 	case WM_T_PCH_SPT:
   3903 	case WM_T_PCH_CNP:
   3904 		size = WM_RAL_TABSIZE_PCH_LPT;
   3905 		break;
   3906 	case WM_T_82575:
   3907 	case WM_T_I210:
   3908 	case WM_T_I211:
   3909 		size = WM_RAL_TABSIZE_82575;
   3910 		break;
   3911 	case WM_T_82576:
   3912 	case WM_T_82580:
   3913 		size = WM_RAL_TABSIZE_82576;
   3914 		break;
   3915 	case WM_T_I350:
   3916 	case WM_T_I354:
   3917 		size = WM_RAL_TABSIZE_I350;
   3918 		break;
   3919 	default:
   3920 		size = WM_RAL_TABSIZE;
   3921 	}
   3922 
   3923 	return size;
   3924 }
   3925 
   3926 /*
   3927  * wm_set_filter:
   3928  *
   3929  *	Set up the receive filter.
   3930  */
   3931 static void
   3932 wm_set_filter(struct wm_softc *sc)
   3933 {
   3934 	struct ethercom *ec = &sc->sc_ethercom;
   3935 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3936 	struct ether_multi *enm;
   3937 	struct ether_multistep step;
   3938 	bus_addr_t mta_reg;
   3939 	uint32_t hash, reg, bit;
   3940 	int i, size, ralmax, rv;
   3941 
   3942 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3943 		device_xname(sc->sc_dev), __func__));
   3944 
   3945 	if (sc->sc_type >= WM_T_82544)
   3946 		mta_reg = WMREG_CORDOVA_MTA;
   3947 	else
   3948 		mta_reg = WMREG_MTA;
   3949 
   3950 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3951 
   3952 	if (ifp->if_flags & IFF_BROADCAST)
   3953 		sc->sc_rctl |= RCTL_BAM;
   3954 	if (ifp->if_flags & IFF_PROMISC) {
   3955 		sc->sc_rctl |= RCTL_UPE;
   3956 		ETHER_LOCK(ec);
   3957 		ec->ec_flags |= ETHER_F_ALLMULTI;
   3958 		ETHER_UNLOCK(ec);
   3959 		goto allmulti;
   3960 	}
   3961 
   3962 	/*
   3963 	 * Set the station address in the first RAL slot, and
   3964 	 * clear the remaining slots.
   3965 	 */
   3966 	size = wm_rar_count(sc);
   3967 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3968 
   3969 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3970 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3971 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3972 		switch (i) {
   3973 		case 0:
   3974 			/* We can use all entries */
   3975 			ralmax = size;
   3976 			break;
   3977 		case 1:
   3978 			/* Only RAR[0] */
   3979 			ralmax = 1;
   3980 			break;
   3981 		default:
   3982 			/* Available SHRA + RAR[0] */
   3983 			ralmax = i + 1;
   3984 		}
   3985 	} else
   3986 		ralmax = size;
   3987 	for (i = 1; i < size; i++) {
   3988 		if (i < ralmax)
   3989 			wm_set_ral(sc, NULL, i);
   3990 	}
   3991 
   3992 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3993 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3994 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3995 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3996 		size = WM_ICH8_MC_TABSIZE;
   3997 	else
   3998 		size = WM_MC_TABSIZE;
   3999 	/* Clear out the multicast table. */
   4000 	for (i = 0; i < size; i++) {
   4001 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   4002 		CSR_WRITE_FLUSH(sc);
   4003 	}
   4004 
   4005 	ETHER_LOCK(ec);
   4006 	ETHER_FIRST_MULTI(step, ec, enm);
   4007 	while (enm != NULL) {
   4008 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   4009 			ec->ec_flags |= ETHER_F_ALLMULTI;
   4010 			ETHER_UNLOCK(ec);
   4011 			/*
   4012 			 * We must listen to a range of multicast addresses.
   4013 			 * For now, just accept all multicasts, rather than
   4014 			 * trying to set only those filter bits needed to match
   4015 			 * the range.  (At this time, the only use of address
   4016 			 * ranges is for IP multicast routing, for which the
   4017 			 * range is big enough to require all bits set.)
   4018 			 */
   4019 			goto allmulti;
   4020 		}
   4021 
   4022 		hash = wm_mchash(sc, enm->enm_addrlo);
   4023 
   4024 		reg = (hash >> 5);
   4025 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4026 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4027 		    || (sc->sc_type == WM_T_PCH2)
   4028 		    || (sc->sc_type == WM_T_PCH_LPT)
   4029 		    || (sc->sc_type == WM_T_PCH_SPT)
   4030 		    || (sc->sc_type == WM_T_PCH_CNP))
   4031 			reg &= 0x1f;
   4032 		else
   4033 			reg &= 0x7f;
   4034 		bit = hash & 0x1f;
   4035 
   4036 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   4037 		hash |= 1U << bit;
   4038 
   4039 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   4040 			/*
   4041 			 * 82544 Errata 9: Certain register cannot be written
   4042 			 * with particular alignments in PCI-X bus operation
   4043 			 * (FCAH, MTA and VFTA).
   4044 			 */
   4045 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   4046 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4047 			CSR_WRITE_FLUSH(sc);
   4048 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   4049 			CSR_WRITE_FLUSH(sc);
   4050 		} else {
   4051 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4052 			CSR_WRITE_FLUSH(sc);
   4053 		}
   4054 
   4055 		ETHER_NEXT_MULTI(step, enm);
   4056 	}
   4057 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4058 	ETHER_UNLOCK(ec);
   4059 
   4060 	goto setit;
   4061 
   4062  allmulti:
   4063 	sc->sc_rctl |= RCTL_MPE;
   4064 
   4065  setit:
   4066 	if (sc->sc_type >= WM_T_PCH2) {
   4067 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4068 		    && (ifp->if_mtu > ETHERMTU))
   4069 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
   4070 		else
   4071 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
   4072 		if (rv != 0)
   4073 			device_printf(sc->sc_dev,
   4074 			    "Failed to do workaround for jumbo frame.\n");
   4075 	}
   4076 
   4077 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   4078 }
   4079 
   4080 /* Reset and init related */
   4081 
   4082 static void
   4083 wm_set_vlan(struct wm_softc *sc)
   4084 {
   4085 
   4086 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4087 		device_xname(sc->sc_dev), __func__));
   4088 
   4089 	/* Deal with VLAN enables. */
   4090 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   4091 		sc->sc_ctrl |= CTRL_VME;
   4092 	else
   4093 		sc->sc_ctrl &= ~CTRL_VME;
   4094 
   4095 	/* Write the control registers. */
   4096 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4097 }
   4098 
   4099 static void
   4100 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   4101 {
   4102 	uint32_t gcr;
   4103 	pcireg_t ctrl2;
   4104 
   4105 	gcr = CSR_READ(sc, WMREG_GCR);
   4106 
   4107 	/* Only take action if timeout value is defaulted to 0 */
   4108 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   4109 		goto out;
   4110 
   4111 	if ((gcr & GCR_CAP_VER2) == 0) {
   4112 		gcr |= GCR_CMPL_TMOUT_10MS;
   4113 		goto out;
   4114 	}
   4115 
   4116 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4117 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   4118 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   4119 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4120 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   4121 
   4122 out:
   4123 	/* Disable completion timeout resend */
   4124 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   4125 
   4126 	CSR_WRITE(sc, WMREG_GCR, gcr);
   4127 }
   4128 
   4129 void
   4130 wm_get_auto_rd_done(struct wm_softc *sc)
   4131 {
   4132 	int i;
   4133 
   4134 	/* wait for eeprom to reload */
   4135 	switch (sc->sc_type) {
   4136 	case WM_T_82571:
   4137 	case WM_T_82572:
   4138 	case WM_T_82573:
   4139 	case WM_T_82574:
   4140 	case WM_T_82583:
   4141 	case WM_T_82575:
   4142 	case WM_T_82576:
   4143 	case WM_T_82580:
   4144 	case WM_T_I350:
   4145 	case WM_T_I354:
   4146 	case WM_T_I210:
   4147 	case WM_T_I211:
   4148 	case WM_T_80003:
   4149 	case WM_T_ICH8:
   4150 	case WM_T_ICH9:
   4151 		for (i = 0; i < 10; i++) {
   4152 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   4153 				break;
   4154 			delay(1000);
   4155 		}
   4156 		if (i == 10) {
   4157 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4158 			    "complete\n", device_xname(sc->sc_dev));
   4159 		}
   4160 		break;
   4161 	default:
   4162 		break;
   4163 	}
   4164 }
   4165 
   4166 void
   4167 wm_lan_init_done(struct wm_softc *sc)
   4168 {
   4169 	uint32_t reg = 0;
   4170 	int i;
   4171 
   4172 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4173 		device_xname(sc->sc_dev), __func__));
   4174 
   4175 	/* Wait for eeprom to reload */
   4176 	switch (sc->sc_type) {
   4177 	case WM_T_ICH10:
   4178 	case WM_T_PCH:
   4179 	case WM_T_PCH2:
   4180 	case WM_T_PCH_LPT:
   4181 	case WM_T_PCH_SPT:
   4182 	case WM_T_PCH_CNP:
   4183 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4184 			reg = CSR_READ(sc, WMREG_STATUS);
   4185 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4186 				break;
   4187 			delay(100);
   4188 		}
   4189 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4190 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4191 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4192 		}
   4193 		break;
   4194 	default:
   4195 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4196 		    __func__);
   4197 		break;
   4198 	}
   4199 
   4200 	reg &= ~STATUS_LAN_INIT_DONE;
   4201 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4202 }
   4203 
   4204 void
   4205 wm_get_cfg_done(struct wm_softc *sc)
   4206 {
   4207 	int mask;
   4208 	uint32_t reg;
   4209 	int i;
   4210 
   4211 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4212 		device_xname(sc->sc_dev), __func__));
   4213 
   4214 	/* Wait for eeprom to reload */
   4215 	switch (sc->sc_type) {
   4216 	case WM_T_82542_2_0:
   4217 	case WM_T_82542_2_1:
   4218 		/* null */
   4219 		break;
   4220 	case WM_T_82543:
   4221 	case WM_T_82544:
   4222 	case WM_T_82540:
   4223 	case WM_T_82545:
   4224 	case WM_T_82545_3:
   4225 	case WM_T_82546:
   4226 	case WM_T_82546_3:
   4227 	case WM_T_82541:
   4228 	case WM_T_82541_2:
   4229 	case WM_T_82547:
   4230 	case WM_T_82547_2:
   4231 	case WM_T_82573:
   4232 	case WM_T_82574:
   4233 	case WM_T_82583:
   4234 		/* generic */
   4235 		delay(10*1000);
   4236 		break;
   4237 	case WM_T_80003:
   4238 	case WM_T_82571:
   4239 	case WM_T_82572:
   4240 	case WM_T_82575:
   4241 	case WM_T_82576:
   4242 	case WM_T_82580:
   4243 	case WM_T_I350:
   4244 	case WM_T_I354:
   4245 	case WM_T_I210:
   4246 	case WM_T_I211:
   4247 		if (sc->sc_type == WM_T_82571) {
   4248 			/* Only 82571 shares port 0 */
   4249 			mask = EEMNGCTL_CFGDONE_0;
   4250 		} else
   4251 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4252 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4253 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4254 				break;
   4255 			delay(1000);
   4256 		}
   4257 		if (i >= WM_PHY_CFG_TIMEOUT)
   4258 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
   4259 				device_xname(sc->sc_dev), __func__));
   4260 		break;
   4261 	case WM_T_ICH8:
   4262 	case WM_T_ICH9:
   4263 	case WM_T_ICH10:
   4264 	case WM_T_PCH:
   4265 	case WM_T_PCH2:
   4266 	case WM_T_PCH_LPT:
   4267 	case WM_T_PCH_SPT:
   4268 	case WM_T_PCH_CNP:
   4269 		delay(10*1000);
   4270 		if (sc->sc_type >= WM_T_ICH10)
   4271 			wm_lan_init_done(sc);
   4272 		else
   4273 			wm_get_auto_rd_done(sc);
   4274 
   4275 		/* Clear PHY Reset Asserted bit */
   4276 		reg = CSR_READ(sc, WMREG_STATUS);
   4277 		if ((reg & STATUS_PHYRA) != 0)
   4278 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4279 		break;
   4280 	default:
   4281 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4282 		    __func__);
   4283 		break;
   4284 	}
   4285 }
   4286 
   4287 int
   4288 wm_phy_post_reset(struct wm_softc *sc)
   4289 {
   4290 	device_t dev = sc->sc_dev;
   4291 	uint16_t reg;
   4292 	int rv = 0;
   4293 
   4294 	/* This function is only for ICH8 and newer. */
   4295 	if (sc->sc_type < WM_T_ICH8)
   4296 		return 0;
   4297 
   4298 	if (wm_phy_resetisblocked(sc)) {
   4299 		/* XXX */
   4300 		device_printf(dev, "PHY is blocked\n");
   4301 		return -1;
   4302 	}
   4303 
   4304 	/* Allow time for h/w to get to quiescent state after reset */
   4305 	delay(10*1000);
   4306 
   4307 	/* Perform any necessary post-reset workarounds */
   4308 	if (sc->sc_type == WM_T_PCH)
   4309 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4310 	else if (sc->sc_type == WM_T_PCH2)
   4311 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4312 	if (rv != 0)
   4313 		return rv;
   4314 
   4315 	/* Clear the host wakeup bit after lcd reset */
   4316 	if (sc->sc_type >= WM_T_PCH) {
   4317 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4318 		reg &= ~BM_WUC_HOST_WU_BIT;
   4319 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4320 	}
   4321 
   4322 	/* Configure the LCD with the extended configuration region in NVM */
   4323 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4324 		return rv;
   4325 
   4326 	/* Configure the LCD with the OEM bits in NVM */
   4327 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4328 
   4329 	if (sc->sc_type == WM_T_PCH2) {
   4330 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4331 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4332 			delay(10 * 1000);
   4333 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4334 		}
   4335 		/* Set EEE LPI Update Timer to 200usec */
   4336 		rv = sc->phy.acquire(sc);
   4337 		if (rv)
   4338 			return rv;
   4339 		rv = wm_write_emi_reg_locked(dev,
   4340 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4341 		sc->phy.release(sc);
   4342 	}
   4343 
   4344 	return rv;
   4345 }
   4346 
   4347 /* Only for PCH and newer */
   4348 static int
   4349 wm_write_smbus_addr(struct wm_softc *sc)
   4350 {
   4351 	uint32_t strap, freq;
   4352 	uint16_t phy_data;
   4353 	int rv;
   4354 
   4355 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4356 		device_xname(sc->sc_dev), __func__));
   4357 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4358 
   4359 	strap = CSR_READ(sc, WMREG_STRAP);
   4360 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4361 
   4362 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4363 	if (rv != 0)
   4364 		return -1;
   4365 
   4366 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4367 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4368 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4369 
   4370 	if (sc->sc_phytype == WMPHY_I217) {
   4371 		/* Restore SMBus frequency */
   4372 		if (freq --) {
   4373 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4374 			    | HV_SMB_ADDR_FREQ_HIGH);
   4375 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4376 			    HV_SMB_ADDR_FREQ_LOW);
   4377 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4378 			    HV_SMB_ADDR_FREQ_HIGH);
   4379 		} else
   4380 			DPRINTF(sc, WM_DEBUG_INIT,
   4381 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4382 				device_xname(sc->sc_dev), __func__));
   4383 	}
   4384 
   4385 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4386 	    phy_data);
   4387 }
   4388 
   4389 static int
   4390 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4391 {
   4392 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4393 	uint16_t phy_page = 0;
   4394 	int rv = 0;
   4395 
   4396 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4397 		device_xname(sc->sc_dev), __func__));
   4398 
   4399 	switch (sc->sc_type) {
   4400 	case WM_T_ICH8:
   4401 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4402 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4403 			return 0;
   4404 
   4405 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4406 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4407 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4408 			break;
   4409 		}
   4410 		/* FALLTHROUGH */
   4411 	case WM_T_PCH:
   4412 	case WM_T_PCH2:
   4413 	case WM_T_PCH_LPT:
   4414 	case WM_T_PCH_SPT:
   4415 	case WM_T_PCH_CNP:
   4416 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4417 		break;
   4418 	default:
   4419 		return 0;
   4420 	}
   4421 
   4422 	if ((rv = sc->phy.acquire(sc)) != 0)
   4423 		return rv;
   4424 
   4425 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4426 	if ((reg & sw_cfg_mask) == 0)
   4427 		goto release;
   4428 
   4429 	/*
   4430 	 * Make sure HW does not configure LCD from PHY extended configuration
   4431 	 * before SW configuration
   4432 	 */
   4433 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4434 	if ((sc->sc_type < WM_T_PCH2)
   4435 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4436 		goto release;
   4437 
   4438 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4439 		device_xname(sc->sc_dev), __func__));
   4440 	/* word_addr is in DWORD */
   4441 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4442 
   4443 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4444 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4445 	if (cnf_size == 0)
   4446 		goto release;
   4447 
   4448 	if (((sc->sc_type == WM_T_PCH)
   4449 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4450 	    || (sc->sc_type > WM_T_PCH)) {
   4451 		/*
   4452 		 * HW configures the SMBus address and LEDs when the OEM and
   4453 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4454 		 * are cleared, SW will configure them instead.
   4455 		 */
   4456 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4457 			device_xname(sc->sc_dev), __func__));
   4458 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4459 			goto release;
   4460 
   4461 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4462 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4463 		    (uint16_t)reg);
   4464 		if (rv != 0)
   4465 			goto release;
   4466 	}
   4467 
   4468 	/* Configure LCD from extended configuration region. */
   4469 	for (i = 0; i < cnf_size; i++) {
   4470 		uint16_t reg_data, reg_addr;
   4471 
   4472 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4473 			goto release;
   4474 
   4475 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4476 			goto release;
   4477 
   4478 		if (reg_addr == IGPHY_PAGE_SELECT)
   4479 			phy_page = reg_data;
   4480 
   4481 		reg_addr &= IGPHY_MAXREGADDR;
   4482 		reg_addr |= phy_page;
   4483 
   4484 		KASSERT(sc->phy.writereg_locked != NULL);
   4485 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4486 		    reg_data);
   4487 	}
   4488 
   4489 release:
   4490 	sc->phy.release(sc);
   4491 	return rv;
   4492 }
   4493 
   4494 /*
   4495  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4496  *  @sc:       pointer to the HW structure
   4497  *  @d0_state: boolean if entering d0 or d3 device state
   4498  *
   4499  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4500  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4501  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4502  */
   4503 int
   4504 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4505 {
   4506 	uint32_t mac_reg;
   4507 	uint16_t oem_reg;
   4508 	int rv;
   4509 
   4510 	if (sc->sc_type < WM_T_PCH)
   4511 		return 0;
   4512 
   4513 	rv = sc->phy.acquire(sc);
   4514 	if (rv != 0)
   4515 		return rv;
   4516 
   4517 	if (sc->sc_type == WM_T_PCH) {
   4518 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4519 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4520 			goto release;
   4521 	}
   4522 
   4523 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4524 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4525 		goto release;
   4526 
   4527 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4528 
   4529 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4530 	if (rv != 0)
   4531 		goto release;
   4532 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4533 
   4534 	if (d0_state) {
   4535 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4536 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4537 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4538 			oem_reg |= HV_OEM_BITS_LPLU;
   4539 	} else {
   4540 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4541 		    != 0)
   4542 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4543 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4544 		    != 0)
   4545 			oem_reg |= HV_OEM_BITS_LPLU;
   4546 	}
   4547 
   4548 	/* Set Restart auto-neg to activate the bits */
   4549 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4550 	    && (wm_phy_resetisblocked(sc) == false))
   4551 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4552 
   4553 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4554 
   4555 release:
   4556 	sc->phy.release(sc);
   4557 
   4558 	return rv;
   4559 }
   4560 
   4561 /* Init hardware bits */
   4562 void
   4563 wm_initialize_hardware_bits(struct wm_softc *sc)
   4564 {
   4565 	uint32_t tarc0, tarc1, reg;
   4566 
   4567 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4568 		device_xname(sc->sc_dev), __func__));
   4569 
   4570 	/* For 82571 variant, 80003 and ICHs */
   4571 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4572 	    || (sc->sc_type >= WM_T_80003)) {
   4573 
   4574 		/* Transmit Descriptor Control 0 */
   4575 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4576 		reg |= TXDCTL_COUNT_DESC;
   4577 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4578 
   4579 		/* Transmit Descriptor Control 1 */
   4580 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4581 		reg |= TXDCTL_COUNT_DESC;
   4582 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4583 
   4584 		/* TARC0 */
   4585 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4586 		switch (sc->sc_type) {
   4587 		case WM_T_82571:
   4588 		case WM_T_82572:
   4589 		case WM_T_82573:
   4590 		case WM_T_82574:
   4591 		case WM_T_82583:
   4592 		case WM_T_80003:
   4593 			/* Clear bits 30..27 */
   4594 			tarc0 &= ~__BITS(30, 27);
   4595 			break;
   4596 		default:
   4597 			break;
   4598 		}
   4599 
   4600 		switch (sc->sc_type) {
   4601 		case WM_T_82571:
   4602 		case WM_T_82572:
   4603 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4604 
   4605 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4606 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4607 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4608 			/* 8257[12] Errata No.7 */
   4609 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4610 
   4611 			/* TARC1 bit 28 */
   4612 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4613 				tarc1 &= ~__BIT(28);
   4614 			else
   4615 				tarc1 |= __BIT(28);
   4616 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4617 
   4618 			/*
   4619 			 * 8257[12] Errata No.13
   4620 			 * Disable Dyamic Clock Gating.
   4621 			 */
   4622 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4623 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4624 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4625 			break;
   4626 		case WM_T_82573:
   4627 		case WM_T_82574:
   4628 		case WM_T_82583:
   4629 			if ((sc->sc_type == WM_T_82574)
   4630 			    || (sc->sc_type == WM_T_82583))
   4631 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4632 
   4633 			/* Extended Device Control */
   4634 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4635 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4636 			reg |= __BIT(22);	/* Set bit 22 */
   4637 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4638 
   4639 			/* Device Control */
   4640 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4641 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4642 
   4643 			/* PCIe Control Register */
   4644 			/*
   4645 			 * 82573 Errata (unknown).
   4646 			 *
   4647 			 * 82574 Errata 25 and 82583 Errata 12
   4648 			 * "Dropped Rx Packets":
   4649 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4650 			 */
   4651 			reg = CSR_READ(sc, WMREG_GCR);
   4652 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4653 			CSR_WRITE(sc, WMREG_GCR, reg);
   4654 
   4655 			if ((sc->sc_type == WM_T_82574)
   4656 			    || (sc->sc_type == WM_T_82583)) {
   4657 				/*
   4658 				 * Document says this bit must be set for
   4659 				 * proper operation.
   4660 				 */
   4661 				reg = CSR_READ(sc, WMREG_GCR);
   4662 				reg |= __BIT(22);
   4663 				CSR_WRITE(sc, WMREG_GCR, reg);
   4664 
   4665 				/*
   4666 				 * Apply workaround for hardware errata
   4667 				 * documented in errata docs Fixes issue where
   4668 				 * some error prone or unreliable PCIe
   4669 				 * completions are occurring, particularly
   4670 				 * with ASPM enabled. Without fix, issue can
   4671 				 * cause Tx timeouts.
   4672 				 */
   4673 				reg = CSR_READ(sc, WMREG_GCR2);
   4674 				reg |= __BIT(0);
   4675 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4676 			}
   4677 			break;
   4678 		case WM_T_80003:
   4679 			/* TARC0 */
   4680 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4681 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4682 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4683 
   4684 			/* TARC1 bit 28 */
   4685 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4686 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4687 				tarc1 &= ~__BIT(28);
   4688 			else
   4689 				tarc1 |= __BIT(28);
   4690 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4691 			break;
   4692 		case WM_T_ICH8:
   4693 		case WM_T_ICH9:
   4694 		case WM_T_ICH10:
   4695 		case WM_T_PCH:
   4696 		case WM_T_PCH2:
   4697 		case WM_T_PCH_LPT:
   4698 		case WM_T_PCH_SPT:
   4699 		case WM_T_PCH_CNP:
   4700 			/* TARC0 */
   4701 			if (sc->sc_type == WM_T_ICH8) {
   4702 				/* Set TARC0 bits 29 and 28 */
   4703 				tarc0 |= __BITS(29, 28);
   4704 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4705 				tarc0 |= __BIT(29);
   4706 				/*
   4707 				 *  Drop bit 28. From Linux.
   4708 				 * See I218/I219 spec update
   4709 				 * "5. Buffer Overrun While the I219 is
   4710 				 * Processing DMA Transactions"
   4711 				 */
   4712 				tarc0 &= ~__BIT(28);
   4713 			}
   4714 			/* Set TARC0 bits 23,24,26,27 */
   4715 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4716 
   4717 			/* CTRL_EXT */
   4718 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4719 			reg |= __BIT(22);	/* Set bit 22 */
   4720 			/*
   4721 			 * Enable PHY low-power state when MAC is at D3
   4722 			 * w/o WoL
   4723 			 */
   4724 			if (sc->sc_type >= WM_T_PCH)
   4725 				reg |= CTRL_EXT_PHYPDEN;
   4726 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4727 
   4728 			/* TARC1 */
   4729 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4730 			/* bit 28 */
   4731 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4732 				tarc1 &= ~__BIT(28);
   4733 			else
   4734 				tarc1 |= __BIT(28);
   4735 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4736 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4737 
   4738 			/* Device Status */
   4739 			if (sc->sc_type == WM_T_ICH8) {
   4740 				reg = CSR_READ(sc, WMREG_STATUS);
   4741 				reg &= ~__BIT(31);
   4742 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4743 
   4744 			}
   4745 
   4746 			/* IOSFPC */
   4747 			if (sc->sc_type == WM_T_PCH_SPT) {
   4748 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4749 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4750 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4751 			}
   4752 			/*
   4753 			 * Work-around descriptor data corruption issue during
   4754 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4755 			 * capability.
   4756 			 */
   4757 			reg = CSR_READ(sc, WMREG_RFCTL);
   4758 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4759 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4760 			break;
   4761 		default:
   4762 			break;
   4763 		}
   4764 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4765 
   4766 		switch (sc->sc_type) {
   4767 		/*
   4768 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4769 		 * Avoid RSS Hash Value bug.
   4770 		 */
   4771 		case WM_T_82571:
   4772 		case WM_T_82572:
   4773 		case WM_T_82573:
   4774 		case WM_T_80003:
   4775 		case WM_T_ICH8:
   4776 			reg = CSR_READ(sc, WMREG_RFCTL);
   4777 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4778 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4779 			break;
   4780 		case WM_T_82574:
   4781 			/* Use extened Rx descriptor. */
   4782 			reg = CSR_READ(sc, WMREG_RFCTL);
   4783 			reg |= WMREG_RFCTL_EXSTEN;
   4784 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4785 			break;
   4786 		default:
   4787 			break;
   4788 		}
   4789 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4790 		/*
   4791 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4792 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4793 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4794 		 * Correctly by the Device"
   4795 		 *
   4796 		 * I354(C2000) Errata AVR53:
   4797 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4798 		 * Hang"
   4799 		 */
   4800 		reg = CSR_READ(sc, WMREG_RFCTL);
   4801 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4802 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4803 	}
   4804 }
   4805 
   4806 static uint32_t
   4807 wm_rxpbs_adjust_82580(uint32_t val)
   4808 {
   4809 	uint32_t rv = 0;
   4810 
   4811 	if (val < __arraycount(wm_82580_rxpbs_table))
   4812 		rv = wm_82580_rxpbs_table[val];
   4813 
   4814 	return rv;
   4815 }
   4816 
   4817 /*
   4818  * wm_reset_phy:
   4819  *
   4820  *	generic PHY reset function.
   4821  *	Same as e1000_phy_hw_reset_generic()
   4822  */
   4823 static int
   4824 wm_reset_phy(struct wm_softc *sc)
   4825 {
   4826 	uint32_t reg;
   4827 
   4828 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4829 		device_xname(sc->sc_dev), __func__));
   4830 	if (wm_phy_resetisblocked(sc))
   4831 		return -1;
   4832 
   4833 	sc->phy.acquire(sc);
   4834 
   4835 	reg = CSR_READ(sc, WMREG_CTRL);
   4836 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4837 	CSR_WRITE_FLUSH(sc);
   4838 
   4839 	delay(sc->phy.reset_delay_us);
   4840 
   4841 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4842 	CSR_WRITE_FLUSH(sc);
   4843 
   4844 	delay(150);
   4845 
   4846 	sc->phy.release(sc);
   4847 
   4848 	wm_get_cfg_done(sc);
   4849 	wm_phy_post_reset(sc);
   4850 
   4851 	return 0;
   4852 }
   4853 
   4854 /*
   4855  * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
   4856  *
   4857  * In i219, the descriptor rings must be emptied before resetting the HW
   4858  * or before changing the device state to D3 during runtime (runtime PM).
   4859  *
   4860  * Failure to do this will cause the HW to enter a unit hang state which can
   4861  * only be released by PCI reset on the device.
   4862  *
   4863  * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
   4864  */
   4865 static void
   4866 wm_flush_desc_rings(struct wm_softc *sc)
   4867 {
   4868 	pcireg_t preg;
   4869 	uint32_t reg;
   4870 	struct wm_txqueue *txq;
   4871 	wiseman_txdesc_t *txd;
   4872 	int nexttx;
   4873 	uint32_t rctl;
   4874 
   4875 	/* First, disable MULR fix in FEXTNVM11 */
   4876 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4877 	reg |= FEXTNVM11_DIS_MULRFIX;
   4878 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4879 
   4880 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4881 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4882 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4883 		return;
   4884 
   4885 	/*
   4886 	 * Remove all descriptors from the tx_ring.
   4887 	 *
   4888 	 * We want to clear all pending descriptors from the TX ring. Zeroing
   4889 	 * happens when the HW reads the regs. We  assign the ring itself as
   4890 	 * the data of the next descriptor. We don't care about the data we are
   4891 	 * about to reset the HW.
   4892 	 */
   4893 #ifdef WM_DEBUG
   4894 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
   4895 #endif
   4896 	reg = CSR_READ(sc, WMREG_TCTL);
   4897 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4898 
   4899 	txq = &sc->sc_queue[0].wmq_txq;
   4900 	nexttx = txq->txq_next;
   4901 	txd = &txq->txq_descs[nexttx];
   4902 	wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
   4903 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4904 	txd->wtx_fields.wtxu_status = 0;
   4905 	txd->wtx_fields.wtxu_options = 0;
   4906 	txd->wtx_fields.wtxu_vlan = 0;
   4907 
   4908 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4909 	    BUS_SPACE_BARRIER_WRITE);
   4910 
   4911 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4912 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4913 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4914 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4915 	delay(250);
   4916 
   4917 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4918 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4919 		return;
   4920 
   4921 	/*
   4922 	 * Mark all descriptors in the RX ring as consumed and disable the
   4923 	 * rx ring.
   4924 	 */
   4925 #ifdef WM_DEBUG
   4926 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   4927 #endif
   4928 	rctl = CSR_READ(sc, WMREG_RCTL);
   4929 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4930 	CSR_WRITE_FLUSH(sc);
   4931 	delay(150);
   4932 
   4933 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4934 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   4935 	reg &= 0xffffc000;
   4936 	/*
   4937 	 * Update thresholds: prefetch threshold to 31, host threshold
   4938 	 * to 1 and make sure the granularity is "descriptors" and not
   4939 	 * "cache lines"
   4940 	 */
   4941 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4942 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4943 
   4944 	/* Momentarily enable the RX ring for the changes to take effect */
   4945 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4946 	CSR_WRITE_FLUSH(sc);
   4947 	delay(150);
   4948 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4949 }
   4950 
   4951 /*
   4952  * wm_reset:
   4953  *
   4954  *	Reset the i82542 chip.
   4955  */
   4956 static void
   4957 wm_reset(struct wm_softc *sc)
   4958 {
   4959 	int phy_reset = 0;
   4960 	int i, error = 0;
   4961 	uint32_t reg;
   4962 	uint16_t kmreg;
   4963 	int rv;
   4964 
   4965 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4966 		device_xname(sc->sc_dev), __func__));
   4967 	KASSERT(sc->sc_type != 0);
   4968 
   4969 	/*
   4970 	 * Allocate on-chip memory according to the MTU size.
   4971 	 * The Packet Buffer Allocation register must be written
   4972 	 * before the chip is reset.
   4973 	 */
   4974 	switch (sc->sc_type) {
   4975 	case WM_T_82547:
   4976 	case WM_T_82547_2:
   4977 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4978 		    PBA_22K : PBA_30K;
   4979 		for (i = 0; i < sc->sc_nqueues; i++) {
   4980 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4981 			txq->txq_fifo_head = 0;
   4982 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4983 			txq->txq_fifo_size =
   4984 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4985 			txq->txq_fifo_stall = 0;
   4986 		}
   4987 		break;
   4988 	case WM_T_82571:
   4989 	case WM_T_82572:
   4990 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4991 	case WM_T_80003:
   4992 		sc->sc_pba = PBA_32K;
   4993 		break;
   4994 	case WM_T_82573:
   4995 		sc->sc_pba = PBA_12K;
   4996 		break;
   4997 	case WM_T_82574:
   4998 	case WM_T_82583:
   4999 		sc->sc_pba = PBA_20K;
   5000 		break;
   5001 	case WM_T_82576:
   5002 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   5003 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   5004 		break;
   5005 	case WM_T_82580:
   5006 	case WM_T_I350:
   5007 	case WM_T_I354:
   5008 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   5009 		break;
   5010 	case WM_T_I210:
   5011 	case WM_T_I211:
   5012 		sc->sc_pba = PBA_34K;
   5013 		break;
   5014 	case WM_T_ICH8:
   5015 		/* Workaround for a bit corruption issue in FIFO memory */
   5016 		sc->sc_pba = PBA_8K;
   5017 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   5018 		break;
   5019 	case WM_T_ICH9:
   5020 	case WM_T_ICH10:
   5021 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   5022 		    PBA_14K : PBA_10K;
   5023 		break;
   5024 	case WM_T_PCH:
   5025 	case WM_T_PCH2:	/* XXX 14K? */
   5026 	case WM_T_PCH_LPT:
   5027 	case WM_T_PCH_SPT:
   5028 	case WM_T_PCH_CNP:
   5029 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
   5030 		    PBA_12K : PBA_26K;
   5031 		break;
   5032 	default:
   5033 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5034 		    PBA_40K : PBA_48K;
   5035 		break;
   5036 	}
   5037 	/*
   5038 	 * Only old or non-multiqueue devices have the PBA register
   5039 	 * XXX Need special handling for 82575.
   5040 	 */
   5041 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5042 	    || (sc->sc_type == WM_T_82575))
   5043 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   5044 
   5045 	/* Prevent the PCI-E bus from sticking */
   5046 	if (sc->sc_flags & WM_F_PCIE) {
   5047 		int timeout = 800;
   5048 
   5049 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   5050 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5051 
   5052 		while (timeout--) {
   5053 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   5054 			    == 0)
   5055 				break;
   5056 			delay(100);
   5057 		}
   5058 		if (timeout == 0)
   5059 			device_printf(sc->sc_dev,
   5060 			    "failed to disable busmastering\n");
   5061 	}
   5062 
   5063 	/* Set the completion timeout for interface */
   5064 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   5065 	    || (sc->sc_type == WM_T_82580)
   5066 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5067 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   5068 		wm_set_pcie_completion_timeout(sc);
   5069 
   5070 	/* Clear interrupt */
   5071 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5072 	if (wm_is_using_msix(sc)) {
   5073 		if (sc->sc_type != WM_T_82574) {
   5074 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5075 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5076 		} else
   5077 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5078 	}
   5079 
   5080 	/* Stop the transmit and receive processes. */
   5081 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5082 	sc->sc_rctl &= ~RCTL_EN;
   5083 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   5084 	CSR_WRITE_FLUSH(sc);
   5085 
   5086 	/* XXX set_tbi_sbp_82543() */
   5087 
   5088 	delay(10*1000);
   5089 
   5090 	/* Must acquire the MDIO ownership before MAC reset */
   5091 	switch (sc->sc_type) {
   5092 	case WM_T_82573:
   5093 	case WM_T_82574:
   5094 	case WM_T_82583:
   5095 		error = wm_get_hw_semaphore_82573(sc);
   5096 		break;
   5097 	default:
   5098 		break;
   5099 	}
   5100 
   5101 	/*
   5102 	 * 82541 Errata 29? & 82547 Errata 28?
   5103 	 * See also the description about PHY_RST bit in CTRL register
   5104 	 * in 8254x_GBe_SDM.pdf.
   5105 	 */
   5106 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   5107 		CSR_WRITE(sc, WMREG_CTRL,
   5108 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   5109 		CSR_WRITE_FLUSH(sc);
   5110 		delay(5000);
   5111 	}
   5112 
   5113 	switch (sc->sc_type) {
   5114 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   5115 	case WM_T_82541:
   5116 	case WM_T_82541_2:
   5117 	case WM_T_82547:
   5118 	case WM_T_82547_2:
   5119 		/*
   5120 		 * On some chipsets, a reset through a memory-mapped write
   5121 		 * cycle can cause the chip to reset before completing the
   5122 		 * write cycle. This causes major headache that can be avoided
   5123 		 * by issuing the reset via indirect register writes through
   5124 		 * I/O space.
   5125 		 *
   5126 		 * So, if we successfully mapped the I/O BAR at attach time,
   5127 		 * use that. Otherwise, try our luck with a memory-mapped
   5128 		 * reset.
   5129 		 */
   5130 		if (sc->sc_flags & WM_F_IOH_VALID)
   5131 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   5132 		else
   5133 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   5134 		break;
   5135 	case WM_T_82545_3:
   5136 	case WM_T_82546_3:
   5137 		/* Use the shadow control register on these chips. */
   5138 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   5139 		break;
   5140 	case WM_T_80003:
   5141 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5142 		sc->phy.acquire(sc);
   5143 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5144 		sc->phy.release(sc);
   5145 		break;
   5146 	case WM_T_ICH8:
   5147 	case WM_T_ICH9:
   5148 	case WM_T_ICH10:
   5149 	case WM_T_PCH:
   5150 	case WM_T_PCH2:
   5151 	case WM_T_PCH_LPT:
   5152 	case WM_T_PCH_SPT:
   5153 	case WM_T_PCH_CNP:
   5154 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5155 		if (wm_phy_resetisblocked(sc) == false) {
   5156 			/*
   5157 			 * Gate automatic PHY configuration by hardware on
   5158 			 * non-managed 82579
   5159 			 */
   5160 			if ((sc->sc_type == WM_T_PCH2)
   5161 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   5162 				== 0))
   5163 				wm_gate_hw_phy_config_ich8lan(sc, true);
   5164 
   5165 			reg |= CTRL_PHY_RESET;
   5166 			phy_reset = 1;
   5167 		} else
   5168 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   5169 		sc->phy.acquire(sc);
   5170 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5171 		/* Don't insert a completion barrier when reset */
   5172 		delay(20*1000);
   5173 		mutex_exit(sc->sc_ich_phymtx);
   5174 		break;
   5175 	case WM_T_82580:
   5176 	case WM_T_I350:
   5177 	case WM_T_I354:
   5178 	case WM_T_I210:
   5179 	case WM_T_I211:
   5180 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5181 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5182 			CSR_WRITE_FLUSH(sc);
   5183 		delay(5000);
   5184 		break;
   5185 	case WM_T_82542_2_0:
   5186 	case WM_T_82542_2_1:
   5187 	case WM_T_82543:
   5188 	case WM_T_82540:
   5189 	case WM_T_82545:
   5190 	case WM_T_82546:
   5191 	case WM_T_82571:
   5192 	case WM_T_82572:
   5193 	case WM_T_82573:
   5194 	case WM_T_82574:
   5195 	case WM_T_82575:
   5196 	case WM_T_82576:
   5197 	case WM_T_82583:
   5198 	default:
   5199 		/* Everything else can safely use the documented method. */
   5200 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5201 		break;
   5202 	}
   5203 
   5204 	/* Must release the MDIO ownership after MAC reset */
   5205 	switch (sc->sc_type) {
   5206 	case WM_T_82573:
   5207 	case WM_T_82574:
   5208 	case WM_T_82583:
   5209 		if (error == 0)
   5210 			wm_put_hw_semaphore_82573(sc);
   5211 		break;
   5212 	default:
   5213 		break;
   5214 	}
   5215 
   5216 	/* Set Phy Config Counter to 50msec */
   5217 	if (sc->sc_type == WM_T_PCH2) {
   5218 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5219 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5220 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5221 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5222 	}
   5223 
   5224 	if (phy_reset != 0)
   5225 		wm_get_cfg_done(sc);
   5226 
   5227 	/* Reload EEPROM */
   5228 	switch (sc->sc_type) {
   5229 	case WM_T_82542_2_0:
   5230 	case WM_T_82542_2_1:
   5231 	case WM_T_82543:
   5232 	case WM_T_82544:
   5233 		delay(10);
   5234 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5235 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5236 		CSR_WRITE_FLUSH(sc);
   5237 		delay(2000);
   5238 		break;
   5239 	case WM_T_82540:
   5240 	case WM_T_82545:
   5241 	case WM_T_82545_3:
   5242 	case WM_T_82546:
   5243 	case WM_T_82546_3:
   5244 		delay(5*1000);
   5245 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5246 		break;
   5247 	case WM_T_82541:
   5248 	case WM_T_82541_2:
   5249 	case WM_T_82547:
   5250 	case WM_T_82547_2:
   5251 		delay(20000);
   5252 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5253 		break;
   5254 	case WM_T_82571:
   5255 	case WM_T_82572:
   5256 	case WM_T_82573:
   5257 	case WM_T_82574:
   5258 	case WM_T_82583:
   5259 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5260 			delay(10);
   5261 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5262 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5263 			CSR_WRITE_FLUSH(sc);
   5264 		}
   5265 		/* check EECD_EE_AUTORD */
   5266 		wm_get_auto_rd_done(sc);
   5267 		/*
   5268 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5269 		 * is set.
   5270 		 */
   5271 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5272 		    || (sc->sc_type == WM_T_82583))
   5273 			delay(25*1000);
   5274 		break;
   5275 	case WM_T_82575:
   5276 	case WM_T_82576:
   5277 	case WM_T_82580:
   5278 	case WM_T_I350:
   5279 	case WM_T_I354:
   5280 	case WM_T_I210:
   5281 	case WM_T_I211:
   5282 	case WM_T_80003:
   5283 		/* check EECD_EE_AUTORD */
   5284 		wm_get_auto_rd_done(sc);
   5285 		break;
   5286 	case WM_T_ICH8:
   5287 	case WM_T_ICH9:
   5288 	case WM_T_ICH10:
   5289 	case WM_T_PCH:
   5290 	case WM_T_PCH2:
   5291 	case WM_T_PCH_LPT:
   5292 	case WM_T_PCH_SPT:
   5293 	case WM_T_PCH_CNP:
   5294 		break;
   5295 	default:
   5296 		panic("%s: unknown type\n", __func__);
   5297 	}
   5298 
   5299 	/* Check whether EEPROM is present or not */
   5300 	switch (sc->sc_type) {
   5301 	case WM_T_82575:
   5302 	case WM_T_82576:
   5303 	case WM_T_82580:
   5304 	case WM_T_I350:
   5305 	case WM_T_I354:
   5306 	case WM_T_ICH8:
   5307 	case WM_T_ICH9:
   5308 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5309 			/* Not found */
   5310 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5311 			if (sc->sc_type == WM_T_82575)
   5312 				wm_reset_init_script_82575(sc);
   5313 		}
   5314 		break;
   5315 	default:
   5316 		break;
   5317 	}
   5318 
   5319 	if (phy_reset != 0)
   5320 		wm_phy_post_reset(sc);
   5321 
   5322 	if ((sc->sc_type == WM_T_82580)
   5323 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5324 		/* Clear global device reset status bit */
   5325 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5326 	}
   5327 
   5328 	/* Clear any pending interrupt events. */
   5329 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5330 	reg = CSR_READ(sc, WMREG_ICR);
   5331 	if (wm_is_using_msix(sc)) {
   5332 		if (sc->sc_type != WM_T_82574) {
   5333 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5334 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5335 		} else
   5336 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5337 	}
   5338 
   5339 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5340 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5341 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5342 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5343 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5344 		reg |= KABGTXD_BGSQLBIAS;
   5345 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5346 	}
   5347 
   5348 	/* Reload sc_ctrl */
   5349 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5350 
   5351 	wm_set_eee(sc);
   5352 
   5353 	/*
   5354 	 * For PCH, this write will make sure that any noise will be detected
   5355 	 * as a CRC error and be dropped rather than show up as a bad packet
   5356 	 * to the DMA engine
   5357 	 */
   5358 	if (sc->sc_type == WM_T_PCH)
   5359 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5360 
   5361 	if (sc->sc_type >= WM_T_82544)
   5362 		CSR_WRITE(sc, WMREG_WUC, 0);
   5363 
   5364 	if (sc->sc_type < WM_T_82575)
   5365 		wm_disable_aspm(sc); /* Workaround for some chips */
   5366 
   5367 	wm_reset_mdicnfg_82580(sc);
   5368 
   5369 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5370 		wm_pll_workaround_i210(sc);
   5371 
   5372 	if (sc->sc_type == WM_T_80003) {
   5373 		/* Default to TRUE to enable the MDIC W/A */
   5374 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5375 
   5376 		rv = wm_kmrn_readreg(sc,
   5377 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5378 		if (rv == 0) {
   5379 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5380 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5381 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5382 			else
   5383 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5384 		}
   5385 	}
   5386 }
   5387 
   5388 /*
   5389  * wm_add_rxbuf:
   5390  *
   5391  *	Add a receive buffer to the indiciated descriptor.
   5392  */
   5393 static int
   5394 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5395 {
   5396 	struct wm_softc *sc = rxq->rxq_sc;
   5397 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5398 	struct mbuf *m;
   5399 	int error;
   5400 
   5401 	KASSERT(mutex_owned(rxq->rxq_lock));
   5402 
   5403 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5404 	if (m == NULL)
   5405 		return ENOBUFS;
   5406 
   5407 	MCLGET(m, M_DONTWAIT);
   5408 	if ((m->m_flags & M_EXT) == 0) {
   5409 		m_freem(m);
   5410 		return ENOBUFS;
   5411 	}
   5412 
   5413 	if (rxs->rxs_mbuf != NULL)
   5414 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5415 
   5416 	rxs->rxs_mbuf = m;
   5417 
   5418 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5419 	/*
   5420 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5421 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5422 	 */
   5423 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5424 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5425 	if (error) {
   5426 		/* XXX XXX XXX */
   5427 		aprint_error_dev(sc->sc_dev,
   5428 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5429 		panic("wm_add_rxbuf");
   5430 	}
   5431 
   5432 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5433 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5434 
   5435 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5436 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5437 			wm_init_rxdesc(rxq, idx);
   5438 	} else
   5439 		wm_init_rxdesc(rxq, idx);
   5440 
   5441 	return 0;
   5442 }
   5443 
   5444 /*
   5445  * wm_rxdrain:
   5446  *
   5447  *	Drain the receive queue.
   5448  */
   5449 static void
   5450 wm_rxdrain(struct wm_rxqueue *rxq)
   5451 {
   5452 	struct wm_softc *sc = rxq->rxq_sc;
   5453 	struct wm_rxsoft *rxs;
   5454 	int i;
   5455 
   5456 	KASSERT(mutex_owned(rxq->rxq_lock));
   5457 
   5458 	for (i = 0; i < WM_NRXDESC; i++) {
   5459 		rxs = &rxq->rxq_soft[i];
   5460 		if (rxs->rxs_mbuf != NULL) {
   5461 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5462 			m_freem(rxs->rxs_mbuf);
   5463 			rxs->rxs_mbuf = NULL;
   5464 		}
   5465 	}
   5466 }
   5467 
   5468 /*
   5469  * Setup registers for RSS.
   5470  *
   5471  * XXX not yet VMDq support
   5472  */
   5473 static void
   5474 wm_init_rss(struct wm_softc *sc)
   5475 {
   5476 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5477 	int i;
   5478 
   5479 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5480 
   5481 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5482 		unsigned int qid, reta_ent;
   5483 
   5484 		qid  = i % sc->sc_nqueues;
   5485 		switch (sc->sc_type) {
   5486 		case WM_T_82574:
   5487 			reta_ent = __SHIFTIN(qid,
   5488 			    RETA_ENT_QINDEX_MASK_82574);
   5489 			break;
   5490 		case WM_T_82575:
   5491 			reta_ent = __SHIFTIN(qid,
   5492 			    RETA_ENT_QINDEX1_MASK_82575);
   5493 			break;
   5494 		default:
   5495 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5496 			break;
   5497 		}
   5498 
   5499 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5500 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5501 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5502 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5503 	}
   5504 
   5505 	rss_getkey((uint8_t *)rss_key);
   5506 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5507 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5508 
   5509 	if (sc->sc_type == WM_T_82574)
   5510 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5511 	else
   5512 		mrqc = MRQC_ENABLE_RSS_MQ;
   5513 
   5514 	/*
   5515 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5516 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5517 	 */
   5518 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5519 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5520 #if 0
   5521 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5522 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   5523 #endif
   5524 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   5525 
   5526 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5527 }
   5528 
   5529 /*
   5530  * Adjust TX and RX queue numbers which the system actulally uses.
   5531  *
   5532  * The numbers are affected by below parameters.
   5533  *     - The nubmer of hardware queues
   5534  *     - The number of MSI-X vectors (= "nvectors" argument)
   5535  *     - ncpu
   5536  */
   5537 static void
   5538 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5539 {
   5540 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5541 
   5542 	if (nvectors < 2) {
   5543 		sc->sc_nqueues = 1;
   5544 		return;
   5545 	}
   5546 
   5547 	switch (sc->sc_type) {
   5548 	case WM_T_82572:
   5549 		hw_ntxqueues = 2;
   5550 		hw_nrxqueues = 2;
   5551 		break;
   5552 	case WM_T_82574:
   5553 		hw_ntxqueues = 2;
   5554 		hw_nrxqueues = 2;
   5555 		break;
   5556 	case WM_T_82575:
   5557 		hw_ntxqueues = 4;
   5558 		hw_nrxqueues = 4;
   5559 		break;
   5560 	case WM_T_82576:
   5561 		hw_ntxqueues = 16;
   5562 		hw_nrxqueues = 16;
   5563 		break;
   5564 	case WM_T_82580:
   5565 	case WM_T_I350:
   5566 	case WM_T_I354:
   5567 		hw_ntxqueues = 8;
   5568 		hw_nrxqueues = 8;
   5569 		break;
   5570 	case WM_T_I210:
   5571 		hw_ntxqueues = 4;
   5572 		hw_nrxqueues = 4;
   5573 		break;
   5574 	case WM_T_I211:
   5575 		hw_ntxqueues = 2;
   5576 		hw_nrxqueues = 2;
   5577 		break;
   5578 		/*
   5579 		 * As below ethernet controllers does not support MSI-X,
   5580 		 * this driver let them not use multiqueue.
   5581 		 *     - WM_T_80003
   5582 		 *     - WM_T_ICH8
   5583 		 *     - WM_T_ICH9
   5584 		 *     - WM_T_ICH10
   5585 		 *     - WM_T_PCH
   5586 		 *     - WM_T_PCH2
   5587 		 *     - WM_T_PCH_LPT
   5588 		 */
   5589 	default:
   5590 		hw_ntxqueues = 1;
   5591 		hw_nrxqueues = 1;
   5592 		break;
   5593 	}
   5594 
   5595 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5596 
   5597 	/*
   5598 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5599 	 * the number of queues used actually.
   5600 	 */
   5601 	if (nvectors < hw_nqueues + 1)
   5602 		sc->sc_nqueues = nvectors - 1;
   5603 	else
   5604 		sc->sc_nqueues = hw_nqueues;
   5605 
   5606 	/*
   5607 	 * As queues more then cpus cannot improve scaling, we limit
   5608 	 * the number of queues used actually.
   5609 	 */
   5610 	if (ncpu < sc->sc_nqueues)
   5611 		sc->sc_nqueues = ncpu;
   5612 }
   5613 
   5614 static inline bool
   5615 wm_is_using_msix(struct wm_softc *sc)
   5616 {
   5617 
   5618 	return (sc->sc_nintrs > 1);
   5619 }
   5620 
   5621 static inline bool
   5622 wm_is_using_multiqueue(struct wm_softc *sc)
   5623 {
   5624 
   5625 	return (sc->sc_nqueues > 1);
   5626 }
   5627 
   5628 static int
   5629 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   5630 {
   5631 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5632 
   5633 	wmq->wmq_id = qidx;
   5634 	wmq->wmq_intr_idx = intr_idx;
   5635 	wmq->wmq_si = softint_establish(SOFTINT_NET | WM_SOFTINT_FLAGS,
   5636 	    wm_handle_queue, wmq);
   5637 	if (wmq->wmq_si != NULL)
   5638 		return 0;
   5639 
   5640 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5641 	    wmq->wmq_id);
   5642 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5643 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5644 	return ENOMEM;
   5645 }
   5646 
   5647 /*
   5648  * Both single interrupt MSI and INTx can use this function.
   5649  */
   5650 static int
   5651 wm_setup_legacy(struct wm_softc *sc)
   5652 {
   5653 	pci_chipset_tag_t pc = sc->sc_pc;
   5654 	const char *intrstr = NULL;
   5655 	char intrbuf[PCI_INTRSTR_LEN];
   5656 	int error;
   5657 
   5658 	error = wm_alloc_txrx_queues(sc);
   5659 	if (error) {
   5660 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5661 		    error);
   5662 		return ENOMEM;
   5663 	}
   5664 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5665 	    sizeof(intrbuf));
   5666 #ifdef WM_MPSAFE
   5667 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5668 #endif
   5669 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5670 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5671 	if (sc->sc_ihs[0] == NULL) {
   5672 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5673 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5674 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5675 		return ENOMEM;
   5676 	}
   5677 
   5678 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5679 	sc->sc_nintrs = 1;
   5680 
   5681 	return wm_softint_establish_queue(sc, 0, 0);
   5682 }
   5683 
   5684 static int
   5685 wm_setup_msix(struct wm_softc *sc)
   5686 {
   5687 	void *vih;
   5688 	kcpuset_t *affinity;
   5689 	int qidx, error, intr_idx, txrx_established;
   5690 	pci_chipset_tag_t pc = sc->sc_pc;
   5691 	const char *intrstr = NULL;
   5692 	char intrbuf[PCI_INTRSTR_LEN];
   5693 	char intr_xname[INTRDEVNAMEBUF];
   5694 
   5695 	if (sc->sc_nqueues < ncpu) {
   5696 		/*
   5697 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5698 		 * interrupts start from CPU#1.
   5699 		 */
   5700 		sc->sc_affinity_offset = 1;
   5701 	} else {
   5702 		/*
   5703 		 * In this case, this device use all CPUs. So, we unify
   5704 		 * affinitied cpu_index to msix vector number for readability.
   5705 		 */
   5706 		sc->sc_affinity_offset = 0;
   5707 	}
   5708 
   5709 	error = wm_alloc_txrx_queues(sc);
   5710 	if (error) {
   5711 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5712 		    error);
   5713 		return ENOMEM;
   5714 	}
   5715 
   5716 	kcpuset_create(&affinity, false);
   5717 	intr_idx = 0;
   5718 
   5719 	/*
   5720 	 * TX and RX
   5721 	 */
   5722 	txrx_established = 0;
   5723 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5724 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5725 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5726 
   5727 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5728 		    sizeof(intrbuf));
   5729 #ifdef WM_MPSAFE
   5730 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5731 		    PCI_INTR_MPSAFE, true);
   5732 #endif
   5733 		memset(intr_xname, 0, sizeof(intr_xname));
   5734 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5735 		    device_xname(sc->sc_dev), qidx);
   5736 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5737 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5738 		if (vih == NULL) {
   5739 			aprint_error_dev(sc->sc_dev,
   5740 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5741 			    intrstr ? " at " : "",
   5742 			    intrstr ? intrstr : "");
   5743 
   5744 			goto fail;
   5745 		}
   5746 		kcpuset_zero(affinity);
   5747 		/* Round-robin affinity */
   5748 		kcpuset_set(affinity, affinity_to);
   5749 		error = interrupt_distribute(vih, affinity, NULL);
   5750 		if (error == 0) {
   5751 			aprint_normal_dev(sc->sc_dev,
   5752 			    "for TX and RX interrupting at %s affinity to %u\n",
   5753 			    intrstr, affinity_to);
   5754 		} else {
   5755 			aprint_normal_dev(sc->sc_dev,
   5756 			    "for TX and RX interrupting at %s\n", intrstr);
   5757 		}
   5758 		sc->sc_ihs[intr_idx] = vih;
   5759 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   5760 			goto fail;
   5761 		txrx_established++;
   5762 		intr_idx++;
   5763 	}
   5764 
   5765 	/* LINK */
   5766 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5767 	    sizeof(intrbuf));
   5768 #ifdef WM_MPSAFE
   5769 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5770 #endif
   5771 	memset(intr_xname, 0, sizeof(intr_xname));
   5772 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5773 	    device_xname(sc->sc_dev));
   5774 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5775 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5776 	if (vih == NULL) {
   5777 		aprint_error_dev(sc->sc_dev,
   5778 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5779 		    intrstr ? " at " : "",
   5780 		    intrstr ? intrstr : "");
   5781 
   5782 		goto fail;
   5783 	}
   5784 	/* Keep default affinity to LINK interrupt */
   5785 	aprint_normal_dev(sc->sc_dev,
   5786 	    "for LINK interrupting at %s\n", intrstr);
   5787 	sc->sc_ihs[intr_idx] = vih;
   5788 	sc->sc_link_intr_idx = intr_idx;
   5789 
   5790 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5791 	kcpuset_destroy(affinity);
   5792 	return 0;
   5793 
   5794  fail:
   5795 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5796 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5797 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5798 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5799 	}
   5800 
   5801 	kcpuset_destroy(affinity);
   5802 	return ENOMEM;
   5803 }
   5804 
   5805 static void
   5806 wm_unset_stopping_flags(struct wm_softc *sc)
   5807 {
   5808 	int i;
   5809 
   5810 	KASSERT(WM_CORE_LOCKED(sc));
   5811 
   5812 	/* Must unset stopping flags in ascending order. */
   5813 	for (i = 0; i < sc->sc_nqueues; i++) {
   5814 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5815 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5816 
   5817 		mutex_enter(txq->txq_lock);
   5818 		txq->txq_stopping = false;
   5819 		mutex_exit(txq->txq_lock);
   5820 
   5821 		mutex_enter(rxq->rxq_lock);
   5822 		rxq->rxq_stopping = false;
   5823 		mutex_exit(rxq->rxq_lock);
   5824 	}
   5825 
   5826 	sc->sc_core_stopping = false;
   5827 }
   5828 
   5829 static void
   5830 wm_set_stopping_flags(struct wm_softc *sc)
   5831 {
   5832 	int i;
   5833 
   5834 	KASSERT(WM_CORE_LOCKED(sc));
   5835 
   5836 	sc->sc_core_stopping = true;
   5837 
   5838 	/* Must set stopping flags in ascending order. */
   5839 	for (i = 0; i < sc->sc_nqueues; i++) {
   5840 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5841 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5842 
   5843 		mutex_enter(rxq->rxq_lock);
   5844 		rxq->rxq_stopping = true;
   5845 		mutex_exit(rxq->rxq_lock);
   5846 
   5847 		mutex_enter(txq->txq_lock);
   5848 		txq->txq_stopping = true;
   5849 		mutex_exit(txq->txq_lock);
   5850 	}
   5851 }
   5852 
   5853 /*
   5854  * Write interrupt interval value to ITR or EITR
   5855  */
   5856 static void
   5857 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5858 {
   5859 
   5860 	if (!wmq->wmq_set_itr)
   5861 		return;
   5862 
   5863 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5864 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5865 
   5866 		/*
   5867 		 * 82575 doesn't have CNT_INGR field.
   5868 		 * So, overwrite counter field by software.
   5869 		 */
   5870 		if (sc->sc_type == WM_T_82575)
   5871 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5872 		else
   5873 			eitr |= EITR_CNT_INGR;
   5874 
   5875 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5876 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5877 		/*
   5878 		 * 82574 has both ITR and EITR. SET EITR when we use
   5879 		 * the multi queue function with MSI-X.
   5880 		 */
   5881 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5882 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5883 	} else {
   5884 		KASSERT(wmq->wmq_id == 0);
   5885 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5886 	}
   5887 
   5888 	wmq->wmq_set_itr = false;
   5889 }
   5890 
   5891 /*
   5892  * TODO
   5893  * Below dynamic calculation of itr is almost the same as linux igb,
   5894  * however it does not fit to wm(4). So, we will have been disable AIM
   5895  * until we will find appropriate calculation of itr.
   5896  */
   5897 /*
   5898  * calculate interrupt interval value to be going to write register in
   5899  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5900  */
   5901 static void
   5902 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5903 {
   5904 #ifdef NOTYET
   5905 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5906 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5907 	uint32_t avg_size = 0;
   5908 	uint32_t new_itr;
   5909 
   5910 	if (rxq->rxq_packets)
   5911 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5912 	if (txq->txq_packets)
   5913 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5914 
   5915 	if (avg_size == 0) {
   5916 		new_itr = 450; /* restore default value */
   5917 		goto out;
   5918 	}
   5919 
   5920 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5921 	avg_size += 24;
   5922 
   5923 	/* Don't starve jumbo frames */
   5924 	avg_size = uimin(avg_size, 3000);
   5925 
   5926 	/* Give a little boost to mid-size frames */
   5927 	if ((avg_size > 300) && (avg_size < 1200))
   5928 		new_itr = avg_size / 3;
   5929 	else
   5930 		new_itr = avg_size / 2;
   5931 
   5932 out:
   5933 	/*
   5934 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5935 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5936 	 */
   5937 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5938 		new_itr *= 4;
   5939 
   5940 	if (new_itr != wmq->wmq_itr) {
   5941 		wmq->wmq_itr = new_itr;
   5942 		wmq->wmq_set_itr = true;
   5943 	} else
   5944 		wmq->wmq_set_itr = false;
   5945 
   5946 	rxq->rxq_packets = 0;
   5947 	rxq->rxq_bytes = 0;
   5948 	txq->txq_packets = 0;
   5949 	txq->txq_bytes = 0;
   5950 #endif
   5951 }
   5952 
   5953 static void
   5954 wm_init_sysctls(struct wm_softc *sc)
   5955 {
   5956 	struct sysctllog **log;
   5957 	const struct sysctlnode *rnode, *qnode, *cnode;
   5958 	int i, rv;
   5959 	const char *dvname;
   5960 
   5961 	log = &sc->sc_sysctllog;
   5962 	dvname = device_xname(sc->sc_dev);
   5963 
   5964 	rv = sysctl_createv(log, 0, NULL, &rnode,
   5965 	    0, CTLTYPE_NODE, dvname,
   5966 	    SYSCTL_DESCR("wm information and settings"),
   5967 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   5968 	if (rv != 0)
   5969 		goto err;
   5970 
   5971 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   5972 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   5973 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   5974 	if (rv != 0)
   5975 		goto teardown;
   5976 
   5977 	for (i = 0; i < sc->sc_nqueues; i++) {
   5978 		struct wm_queue *wmq = &sc->sc_queue[i];
   5979 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5980 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5981 
   5982 		snprintf(sc->sc_queue[i].sysctlname,
   5983 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
   5984 
   5985 		if (sysctl_createv(log, 0, &rnode, &qnode,
   5986 		    0, CTLTYPE_NODE,
   5987 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
   5988 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   5989 			break;
   5990 		if (sysctl_createv(log, 0, &qnode, &cnode,
   5991 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5992 		    "txq_free", SYSCTL_DESCR("TX queue free"),
   5993 		    NULL, 0, &txq->txq_free,
   5994 		    0, CTL_CREATE, CTL_EOL) != 0)
   5995 			break;
   5996 		if (sysctl_createv(log, 0, &qnode, &cnode,
   5997 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5998 		    "txq_next", SYSCTL_DESCR("TX queue next"),
   5999 		    NULL, 0, &txq->txq_next,
   6000 		    0, CTL_CREATE, CTL_EOL) != 0)
   6001 			break;
   6002 
   6003 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6004 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6005 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
   6006 		    NULL, 0, &rxq->rxq_ptr,
   6007 		    0, CTL_CREATE, CTL_EOL) != 0)
   6008 			break;
   6009 	}
   6010 
   6011 #ifdef WM_DEBUG
   6012 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6013 	    CTLTYPE_INT, "debug_flags",
   6014 	    SYSCTL_DESCR(
   6015 		    "Debug flags:\n"	\
   6016 		    "\t0x01 LINK\n"	\
   6017 		    "\t0x02 TX\n"	\
   6018 		    "\t0x04 RX\n"	\
   6019 		    "\t0x08 GMII\n"	\
   6020 		    "\t0x10 MANAGE\n"	\
   6021 		    "\t0x20 NVM\n"	\
   6022 		    "\t0x40 INIT\n"	\
   6023 		    "\t0x80 LOCK"),
   6024 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
   6025 	if (rv != 0)
   6026 		goto teardown;
   6027 #endif
   6028 
   6029 	return;
   6030 
   6031 teardown:
   6032 	sysctl_teardown(log);
   6033 err:
   6034 	sc->sc_sysctllog = NULL;
   6035 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   6036 	    __func__, rv);
   6037 }
   6038 
   6039 /*
   6040  * wm_init:		[ifnet interface function]
   6041  *
   6042  *	Initialize the interface.
   6043  */
   6044 static int
   6045 wm_init(struct ifnet *ifp)
   6046 {
   6047 	struct wm_softc *sc = ifp->if_softc;
   6048 	int ret;
   6049 
   6050 	WM_CORE_LOCK(sc);
   6051 	ret = wm_init_locked(ifp);
   6052 	WM_CORE_UNLOCK(sc);
   6053 
   6054 	return ret;
   6055 }
   6056 
   6057 static int
   6058 wm_init_locked(struct ifnet *ifp)
   6059 {
   6060 	struct wm_softc *sc = ifp->if_softc;
   6061 	struct ethercom *ec = &sc->sc_ethercom;
   6062 	int i, j, trynum, error = 0;
   6063 	uint32_t reg, sfp_mask = 0;
   6064 
   6065 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6066 		device_xname(sc->sc_dev), __func__));
   6067 	KASSERT(WM_CORE_LOCKED(sc));
   6068 
   6069 	/*
   6070 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   6071 	 * There is a small but measurable benefit to avoiding the adjusment
   6072 	 * of the descriptor so that the headers are aligned, for normal mtu,
   6073 	 * on such platforms.  One possibility is that the DMA itself is
   6074 	 * slightly more efficient if the front of the entire packet (instead
   6075 	 * of the front of the headers) is aligned.
   6076 	 *
   6077 	 * Note we must always set align_tweak to 0 if we are using
   6078 	 * jumbo frames.
   6079 	 */
   6080 #ifdef __NO_STRICT_ALIGNMENT
   6081 	sc->sc_align_tweak = 0;
   6082 #else
   6083 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   6084 		sc->sc_align_tweak = 0;
   6085 	else
   6086 		sc->sc_align_tweak = 2;
   6087 #endif /* __NO_STRICT_ALIGNMENT */
   6088 
   6089 	/* Cancel any pending I/O. */
   6090 	wm_stop_locked(ifp, false, false);
   6091 
   6092 	/* Update statistics before reset */
   6093 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   6094 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   6095 
   6096 	/* PCH_SPT hardware workaround */
   6097 	if (sc->sc_type == WM_T_PCH_SPT)
   6098 		wm_flush_desc_rings(sc);
   6099 
   6100 	/* Reset the chip to a known state. */
   6101 	wm_reset(sc);
   6102 
   6103 	/*
   6104 	 * AMT based hardware can now take control from firmware
   6105 	 * Do this after reset.
   6106 	 */
   6107 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   6108 		wm_get_hw_control(sc);
   6109 
   6110 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   6111 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   6112 		wm_legacy_irq_quirk_spt(sc);
   6113 
   6114 	/* Init hardware bits */
   6115 	wm_initialize_hardware_bits(sc);
   6116 
   6117 	/* Reset the PHY. */
   6118 	if (sc->sc_flags & WM_F_HAS_MII)
   6119 		wm_gmii_reset(sc);
   6120 
   6121 	if (sc->sc_type >= WM_T_ICH8) {
   6122 		reg = CSR_READ(sc, WMREG_GCR);
   6123 		/*
   6124 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   6125 		 * default after reset.
   6126 		 */
   6127 		if (sc->sc_type == WM_T_ICH8)
   6128 			reg |= GCR_NO_SNOOP_ALL;
   6129 		else
   6130 			reg &= ~GCR_NO_SNOOP_ALL;
   6131 		CSR_WRITE(sc, WMREG_GCR, reg);
   6132 	}
   6133 
   6134 	if ((sc->sc_type >= WM_T_ICH8)
   6135 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   6136 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   6137 
   6138 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6139 		reg |= CTRL_EXT_RO_DIS;
   6140 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6141 	}
   6142 
   6143 	/* Calculate (E)ITR value */
   6144 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   6145 		/*
   6146 		 * For NEWQUEUE's EITR (except for 82575).
   6147 		 * 82575's EITR should be set same throttling value as other
   6148 		 * old controllers' ITR because the interrupt/sec calculation
   6149 		 * is the same, that is, 1,000,000,000 / (N * 256).
   6150 		 *
   6151 		 * 82574's EITR should be set same throttling value as ITR.
   6152 		 *
   6153 		 * For N interrupts/sec, set this value to:
   6154 		 * 1,000,000 / N in contrast to ITR throttoling value.
   6155 		 */
   6156 		sc->sc_itr_init = 450;
   6157 	} else if (sc->sc_type >= WM_T_82543) {
   6158 		/*
   6159 		 * Set up the interrupt throttling register (units of 256ns)
   6160 		 * Note that a footnote in Intel's documentation says this
   6161 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   6162 		 * or 10Mbit mode.  Empirically, it appears to be the case
   6163 		 * that that is also true for the 1024ns units of the other
   6164 		 * interrupt-related timer registers -- so, really, we ought
   6165 		 * to divide this value by 4 when the link speed is low.
   6166 		 *
   6167 		 * XXX implement this division at link speed change!
   6168 		 */
   6169 
   6170 		/*
   6171 		 * For N interrupts/sec, set this value to:
   6172 		 * 1,000,000,000 / (N * 256).  Note that we set the
   6173 		 * absolute and packet timer values to this value
   6174 		 * divided by 4 to get "simple timer" behavior.
   6175 		 */
   6176 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   6177 	}
   6178 
   6179 	error = wm_init_txrx_queues(sc);
   6180 	if (error)
   6181 		goto out;
   6182 
   6183 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   6184 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   6185 	    (sc->sc_type >= WM_T_82575))
   6186 		wm_serdes_power_up_link_82575(sc);
   6187 
   6188 	/* Clear out the VLAN table -- we don't use it (yet). */
   6189 	CSR_WRITE(sc, WMREG_VET, 0);
   6190 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   6191 		trynum = 10; /* Due to hw errata */
   6192 	else
   6193 		trynum = 1;
   6194 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   6195 		for (j = 0; j < trynum; j++)
   6196 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   6197 
   6198 	/*
   6199 	 * Set up flow-control parameters.
   6200 	 *
   6201 	 * XXX Values could probably stand some tuning.
   6202 	 */
   6203 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   6204 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   6205 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   6206 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   6207 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   6208 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   6209 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   6210 	}
   6211 
   6212 	sc->sc_fcrtl = FCRTL_DFLT;
   6213 	if (sc->sc_type < WM_T_82543) {
   6214 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   6215 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   6216 	} else {
   6217 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   6218 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   6219 	}
   6220 
   6221 	if (sc->sc_type == WM_T_80003)
   6222 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   6223 	else
   6224 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   6225 
   6226 	/* Writes the control register. */
   6227 	wm_set_vlan(sc);
   6228 
   6229 	if (sc->sc_flags & WM_F_HAS_MII) {
   6230 		uint16_t kmreg;
   6231 
   6232 		switch (sc->sc_type) {
   6233 		case WM_T_80003:
   6234 		case WM_T_ICH8:
   6235 		case WM_T_ICH9:
   6236 		case WM_T_ICH10:
   6237 		case WM_T_PCH:
   6238 		case WM_T_PCH2:
   6239 		case WM_T_PCH_LPT:
   6240 		case WM_T_PCH_SPT:
   6241 		case WM_T_PCH_CNP:
   6242 			/*
   6243 			 * Set the mac to wait the maximum time between each
   6244 			 * iteration and increase the max iterations when
   6245 			 * polling the phy; this fixes erroneous timeouts at
   6246 			 * 10Mbps.
   6247 			 */
   6248 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   6249 			    0xFFFF);
   6250 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6251 			    &kmreg);
   6252 			kmreg |= 0x3F;
   6253 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6254 			    kmreg);
   6255 			break;
   6256 		default:
   6257 			break;
   6258 		}
   6259 
   6260 		if (sc->sc_type == WM_T_80003) {
   6261 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6262 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6263 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6264 
   6265 			/* Bypass RX and TX FIFO's */
   6266 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6267 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6268 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6269 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6270 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6271 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6272 		}
   6273 	}
   6274 #if 0
   6275 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6276 #endif
   6277 
   6278 	/* Set up checksum offload parameters. */
   6279 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6280 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6281 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6282 		reg |= RXCSUM_IPOFL;
   6283 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6284 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6285 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6286 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6287 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6288 
   6289 	/* Set registers about MSI-X */
   6290 	if (wm_is_using_msix(sc)) {
   6291 		uint32_t ivar, qintr_idx;
   6292 		struct wm_queue *wmq;
   6293 		unsigned int qid;
   6294 
   6295 		if (sc->sc_type == WM_T_82575) {
   6296 			/* Interrupt control */
   6297 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6298 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6299 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6300 
   6301 			/* TX and RX */
   6302 			for (i = 0; i < sc->sc_nqueues; i++) {
   6303 				wmq = &sc->sc_queue[i];
   6304 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6305 				    EITR_TX_QUEUE(wmq->wmq_id)
   6306 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6307 			}
   6308 			/* Link status */
   6309 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6310 			    EITR_OTHER);
   6311 		} else if (sc->sc_type == WM_T_82574) {
   6312 			/* Interrupt control */
   6313 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6314 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6315 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6316 
   6317 			/*
   6318 			 * Workaround issue with spurious interrupts
   6319 			 * in MSI-X mode.
   6320 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6321 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6322 			 */
   6323 			reg = CSR_READ(sc, WMREG_RFCTL);
   6324 			reg |= WMREG_RFCTL_ACKDIS;
   6325 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6326 
   6327 			ivar = 0;
   6328 			/* TX and RX */
   6329 			for (i = 0; i < sc->sc_nqueues; i++) {
   6330 				wmq = &sc->sc_queue[i];
   6331 				qid = wmq->wmq_id;
   6332 				qintr_idx = wmq->wmq_intr_idx;
   6333 
   6334 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6335 				    IVAR_TX_MASK_Q_82574(qid));
   6336 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6337 				    IVAR_RX_MASK_Q_82574(qid));
   6338 			}
   6339 			/* Link status */
   6340 			ivar |= __SHIFTIN((IVAR_VALID_82574
   6341 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   6342 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   6343 		} else {
   6344 			/* Interrupt control */
   6345 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   6346 			    | GPIE_EIAME | GPIE_PBA);
   6347 
   6348 			switch (sc->sc_type) {
   6349 			case WM_T_82580:
   6350 			case WM_T_I350:
   6351 			case WM_T_I354:
   6352 			case WM_T_I210:
   6353 			case WM_T_I211:
   6354 				/* TX and RX */
   6355 				for (i = 0; i < sc->sc_nqueues; i++) {
   6356 					wmq = &sc->sc_queue[i];
   6357 					qid = wmq->wmq_id;
   6358 					qintr_idx = wmq->wmq_intr_idx;
   6359 
   6360 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6361 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6362 					ivar |= __SHIFTIN((qintr_idx
   6363 						| IVAR_VALID),
   6364 					    IVAR_TX_MASK_Q(qid));
   6365 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6366 					ivar |= __SHIFTIN((qintr_idx
   6367 						| IVAR_VALID),
   6368 					    IVAR_RX_MASK_Q(qid));
   6369 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6370 				}
   6371 				break;
   6372 			case WM_T_82576:
   6373 				/* TX and RX */
   6374 				for (i = 0; i < sc->sc_nqueues; i++) {
   6375 					wmq = &sc->sc_queue[i];
   6376 					qid = wmq->wmq_id;
   6377 					qintr_idx = wmq->wmq_intr_idx;
   6378 
   6379 					ivar = CSR_READ(sc,
   6380 					    WMREG_IVAR_Q_82576(qid));
   6381 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6382 					ivar |= __SHIFTIN((qintr_idx
   6383 						| IVAR_VALID),
   6384 					    IVAR_TX_MASK_Q_82576(qid));
   6385 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6386 					ivar |= __SHIFTIN((qintr_idx
   6387 						| IVAR_VALID),
   6388 					    IVAR_RX_MASK_Q_82576(qid));
   6389 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6390 					    ivar);
   6391 				}
   6392 				break;
   6393 			default:
   6394 				break;
   6395 			}
   6396 
   6397 			/* Link status */
   6398 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6399 			    IVAR_MISC_OTHER);
   6400 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6401 		}
   6402 
   6403 		if (wm_is_using_multiqueue(sc)) {
   6404 			wm_init_rss(sc);
   6405 
   6406 			/*
   6407 			** NOTE: Receive Full-Packet Checksum Offload
   6408 			** is mutually exclusive with Multiqueue. However
   6409 			** this is not the same as TCP/IP checksums which
   6410 			** still work.
   6411 			*/
   6412 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6413 			reg |= RXCSUM_PCSD;
   6414 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6415 		}
   6416 	}
   6417 
   6418 	/* Set up the interrupt registers. */
   6419 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6420 
   6421 	/* Enable SFP module insertion interrupt if it's required */
   6422 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6423 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6424 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6425 		sfp_mask = ICR_GPI(0);
   6426 	}
   6427 
   6428 	if (wm_is_using_msix(sc)) {
   6429 		uint32_t mask;
   6430 		struct wm_queue *wmq;
   6431 
   6432 		switch (sc->sc_type) {
   6433 		case WM_T_82574:
   6434 			mask = 0;
   6435 			for (i = 0; i < sc->sc_nqueues; i++) {
   6436 				wmq = &sc->sc_queue[i];
   6437 				mask |= ICR_TXQ(wmq->wmq_id);
   6438 				mask |= ICR_RXQ(wmq->wmq_id);
   6439 			}
   6440 			mask |= ICR_OTHER;
   6441 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6442 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6443 			break;
   6444 		default:
   6445 			if (sc->sc_type == WM_T_82575) {
   6446 				mask = 0;
   6447 				for (i = 0; i < sc->sc_nqueues; i++) {
   6448 					wmq = &sc->sc_queue[i];
   6449 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6450 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6451 				}
   6452 				mask |= EITR_OTHER;
   6453 			} else {
   6454 				mask = 0;
   6455 				for (i = 0; i < sc->sc_nqueues; i++) {
   6456 					wmq = &sc->sc_queue[i];
   6457 					mask |= 1 << wmq->wmq_intr_idx;
   6458 				}
   6459 				mask |= 1 << sc->sc_link_intr_idx;
   6460 			}
   6461 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6462 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6463 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6464 
   6465 			/* For other interrupts */
   6466 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6467 			break;
   6468 		}
   6469 	} else {
   6470 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6471 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6472 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6473 	}
   6474 
   6475 	/* Set up the inter-packet gap. */
   6476 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6477 
   6478 	if (sc->sc_type >= WM_T_82543) {
   6479 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6480 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6481 			wm_itrs_writereg(sc, wmq);
   6482 		}
   6483 		/*
   6484 		 * Link interrupts occur much less than TX
   6485 		 * interrupts and RX interrupts. So, we don't
   6486 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6487 		 * FreeBSD's if_igb.
   6488 		 */
   6489 	}
   6490 
   6491 	/* Set the VLAN ethernetype. */
   6492 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6493 
   6494 	/*
   6495 	 * Set up the transmit control register; we start out with
   6496 	 * a collision distance suitable for FDX, but update it whe
   6497 	 * we resolve the media type.
   6498 	 */
   6499 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6500 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6501 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6502 	if (sc->sc_type >= WM_T_82571)
   6503 		sc->sc_tctl |= TCTL_MULR;
   6504 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6505 
   6506 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6507 		/* Write TDT after TCTL.EN is set. See the document. */
   6508 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6509 	}
   6510 
   6511 	if (sc->sc_type == WM_T_80003) {
   6512 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6513 		reg &= ~TCTL_EXT_GCEX_MASK;
   6514 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6515 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6516 	}
   6517 
   6518 	/* Set the media. */
   6519 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6520 		goto out;
   6521 
   6522 	/* Configure for OS presence */
   6523 	wm_init_manageability(sc);
   6524 
   6525 	/*
   6526 	 * Set up the receive control register; we actually program the
   6527 	 * register when we set the receive filter. Use multicast address
   6528 	 * offset type 0.
   6529 	 *
   6530 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6531 	 * don't enable that feature.
   6532 	 */
   6533 	sc->sc_mchash_type = 0;
   6534 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6535 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6536 
   6537 	/* 82574 use one buffer extended Rx descriptor. */
   6538 	if (sc->sc_type == WM_T_82574)
   6539 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6540 
   6541 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
   6542 		sc->sc_rctl |= RCTL_SECRC;
   6543 
   6544 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6545 	    && (ifp->if_mtu > ETHERMTU)) {
   6546 		sc->sc_rctl |= RCTL_LPE;
   6547 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6548 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6549 	}
   6550 
   6551 	if (MCLBYTES == 2048)
   6552 		sc->sc_rctl |= RCTL_2k;
   6553 	else {
   6554 		if (sc->sc_type >= WM_T_82543) {
   6555 			switch (MCLBYTES) {
   6556 			case 4096:
   6557 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6558 				break;
   6559 			case 8192:
   6560 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6561 				break;
   6562 			case 16384:
   6563 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6564 				break;
   6565 			default:
   6566 				panic("wm_init: MCLBYTES %d unsupported",
   6567 				    MCLBYTES);
   6568 				break;
   6569 			}
   6570 		} else
   6571 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6572 	}
   6573 
   6574 	/* Enable ECC */
   6575 	switch (sc->sc_type) {
   6576 	case WM_T_82571:
   6577 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6578 		reg |= PBA_ECC_CORR_EN;
   6579 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6580 		break;
   6581 	case WM_T_PCH_LPT:
   6582 	case WM_T_PCH_SPT:
   6583 	case WM_T_PCH_CNP:
   6584 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6585 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6586 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6587 
   6588 		sc->sc_ctrl |= CTRL_MEHE;
   6589 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6590 		break;
   6591 	default:
   6592 		break;
   6593 	}
   6594 
   6595 	/*
   6596 	 * Set the receive filter.
   6597 	 *
   6598 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6599 	 * the setting of RCTL.EN in wm_set_filter()
   6600 	 */
   6601 	wm_set_filter(sc);
   6602 
   6603 	/* On 575 and later set RDT only if RX enabled */
   6604 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6605 		int qidx;
   6606 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6607 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6608 			for (i = 0; i < WM_NRXDESC; i++) {
   6609 				mutex_enter(rxq->rxq_lock);
   6610 				wm_init_rxdesc(rxq, i);
   6611 				mutex_exit(rxq->rxq_lock);
   6612 
   6613 			}
   6614 		}
   6615 	}
   6616 
   6617 	wm_unset_stopping_flags(sc);
   6618 
   6619 	/* Start the one second link check clock. */
   6620 	callout_schedule(&sc->sc_tick_ch, hz);
   6621 
   6622 	/* ...all done! */
   6623 	ifp->if_flags |= IFF_RUNNING;
   6624 
   6625  out:
   6626 	/* Save last flags for the callback */
   6627 	sc->sc_if_flags = ifp->if_flags;
   6628 	sc->sc_ec_capenable = ec->ec_capenable;
   6629 	if (error)
   6630 		log(LOG_ERR, "%s: interface not running\n",
   6631 		    device_xname(sc->sc_dev));
   6632 	return error;
   6633 }
   6634 
   6635 /*
   6636  * wm_stop:		[ifnet interface function]
   6637  *
   6638  *	Stop transmission on the interface.
   6639  */
   6640 static void
   6641 wm_stop(struct ifnet *ifp, int disable)
   6642 {
   6643 	struct wm_softc *sc = ifp->if_softc;
   6644 
   6645 	ASSERT_SLEEPABLE();
   6646 
   6647 	WM_CORE_LOCK(sc);
   6648 	wm_stop_locked(ifp, disable ? true : false, true);
   6649 	WM_CORE_UNLOCK(sc);
   6650 
   6651 	/*
   6652 	 * After wm_set_stopping_flags(), it is guaranteed
   6653 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   6654 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   6655 	 * because it can sleep...
   6656 	 * so, call workqueue_wait() here.
   6657 	 */
   6658 	for (int i = 0; i < sc->sc_nqueues; i++)
   6659 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   6660 }
   6661 
   6662 static void
   6663 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   6664 {
   6665 	struct wm_softc *sc = ifp->if_softc;
   6666 	struct wm_txsoft *txs;
   6667 	int i, qidx;
   6668 
   6669 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6670 		device_xname(sc->sc_dev), __func__));
   6671 	KASSERT(WM_CORE_LOCKED(sc));
   6672 
   6673 	wm_set_stopping_flags(sc);
   6674 
   6675 	if (sc->sc_flags & WM_F_HAS_MII) {
   6676 		/* Down the MII. */
   6677 		mii_down(&sc->sc_mii);
   6678 	} else {
   6679 #if 0
   6680 		/* Should we clear PHY's status properly? */
   6681 		wm_reset(sc);
   6682 #endif
   6683 	}
   6684 
   6685 	/* Stop the transmit and receive processes. */
   6686 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6687 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6688 	sc->sc_rctl &= ~RCTL_EN;
   6689 
   6690 	/*
   6691 	 * Clear the interrupt mask to ensure the device cannot assert its
   6692 	 * interrupt line.
   6693 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6694 	 * service any currently pending or shared interrupt.
   6695 	 */
   6696 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6697 	sc->sc_icr = 0;
   6698 	if (wm_is_using_msix(sc)) {
   6699 		if (sc->sc_type != WM_T_82574) {
   6700 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6701 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6702 		} else
   6703 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6704 	}
   6705 
   6706 	/*
   6707 	 * Stop callouts after interrupts are disabled; if we have
   6708 	 * to wait for them, we will be releasing the CORE_LOCK
   6709 	 * briefly, which will unblock interrupts on the current CPU.
   6710 	 */
   6711 
   6712 	/* Stop the one second clock. */
   6713 	if (wait)
   6714 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   6715 	else
   6716 		callout_stop(&sc->sc_tick_ch);
   6717 
   6718 	/* Stop the 82547 Tx FIFO stall check timer. */
   6719 	if (sc->sc_type == WM_T_82547) {
   6720 		if (wait)
   6721 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   6722 		else
   6723 			callout_stop(&sc->sc_txfifo_ch);
   6724 	}
   6725 
   6726 	/* Release any queued transmit buffers. */
   6727 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6728 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6729 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6730 		struct mbuf *m;
   6731 
   6732 		mutex_enter(txq->txq_lock);
   6733 		txq->txq_sending = false; /* Ensure watchdog disabled */
   6734 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6735 			txs = &txq->txq_soft[i];
   6736 			if (txs->txs_mbuf != NULL) {
   6737 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6738 				m_freem(txs->txs_mbuf);
   6739 				txs->txs_mbuf = NULL;
   6740 			}
   6741 		}
   6742 		/* Drain txq_interq */
   6743 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6744 			m_freem(m);
   6745 		mutex_exit(txq->txq_lock);
   6746 	}
   6747 
   6748 	/* Mark the interface as down and cancel the watchdog timer. */
   6749 	ifp->if_flags &= ~IFF_RUNNING;
   6750 
   6751 	if (disable) {
   6752 		for (i = 0; i < sc->sc_nqueues; i++) {
   6753 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6754 			mutex_enter(rxq->rxq_lock);
   6755 			wm_rxdrain(rxq);
   6756 			mutex_exit(rxq->rxq_lock);
   6757 		}
   6758 	}
   6759 
   6760 #if 0 /* notyet */
   6761 	if (sc->sc_type >= WM_T_82544)
   6762 		CSR_WRITE(sc, WMREG_WUC, 0);
   6763 #endif
   6764 }
   6765 
   6766 static void
   6767 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6768 {
   6769 	struct mbuf *m;
   6770 	int i;
   6771 
   6772 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6773 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6774 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6775 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6776 		    m->m_data, m->m_len, m->m_flags);
   6777 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6778 	    i, i == 1 ? "" : "s");
   6779 }
   6780 
   6781 /*
   6782  * wm_82547_txfifo_stall:
   6783  *
   6784  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6785  *	reset the FIFO pointers, and restart packet transmission.
   6786  */
   6787 static void
   6788 wm_82547_txfifo_stall(void *arg)
   6789 {
   6790 	struct wm_softc *sc = arg;
   6791 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6792 
   6793 	mutex_enter(txq->txq_lock);
   6794 
   6795 	if (txq->txq_stopping)
   6796 		goto out;
   6797 
   6798 	if (txq->txq_fifo_stall) {
   6799 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6800 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6801 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6802 			/*
   6803 			 * Packets have drained.  Stop transmitter, reset
   6804 			 * FIFO pointers, restart transmitter, and kick
   6805 			 * the packet queue.
   6806 			 */
   6807 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6808 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6809 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6810 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6811 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6812 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6813 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6814 			CSR_WRITE_FLUSH(sc);
   6815 
   6816 			txq->txq_fifo_head = 0;
   6817 			txq->txq_fifo_stall = 0;
   6818 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6819 		} else {
   6820 			/*
   6821 			 * Still waiting for packets to drain; try again in
   6822 			 * another tick.
   6823 			 */
   6824 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6825 		}
   6826 	}
   6827 
   6828 out:
   6829 	mutex_exit(txq->txq_lock);
   6830 }
   6831 
   6832 /*
   6833  * wm_82547_txfifo_bugchk:
   6834  *
   6835  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6836  *	prevent enqueueing a packet that would wrap around the end
   6837  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6838  *
   6839  *	We do this by checking the amount of space before the end
   6840  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6841  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6842  *	the internal FIFO pointers to the beginning, and restart
   6843  *	transmission on the interface.
   6844  */
   6845 #define	WM_FIFO_HDR		0x10
   6846 #define	WM_82547_PAD_LEN	0x3e0
   6847 static int
   6848 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6849 {
   6850 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6851 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6852 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6853 
   6854 	/* Just return if already stalled. */
   6855 	if (txq->txq_fifo_stall)
   6856 		return 1;
   6857 
   6858 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6859 		/* Stall only occurs in half-duplex mode. */
   6860 		goto send_packet;
   6861 	}
   6862 
   6863 	if (len >= WM_82547_PAD_LEN + space) {
   6864 		txq->txq_fifo_stall = 1;
   6865 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6866 		return 1;
   6867 	}
   6868 
   6869  send_packet:
   6870 	txq->txq_fifo_head += len;
   6871 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6872 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6873 
   6874 	return 0;
   6875 }
   6876 
   6877 static int
   6878 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6879 {
   6880 	int error;
   6881 
   6882 	/*
   6883 	 * Allocate the control data structures, and create and load the
   6884 	 * DMA map for it.
   6885 	 *
   6886 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6887 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6888 	 * both sets within the same 4G segment.
   6889 	 */
   6890 	if (sc->sc_type < WM_T_82544)
   6891 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6892 	else
   6893 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6894 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6895 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6896 	else
   6897 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6898 
   6899 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6900 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6901 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6902 		aprint_error_dev(sc->sc_dev,
   6903 		    "unable to allocate TX control data, error = %d\n",
   6904 		    error);
   6905 		goto fail_0;
   6906 	}
   6907 
   6908 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6909 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6910 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6911 		aprint_error_dev(sc->sc_dev,
   6912 		    "unable to map TX control data, error = %d\n", error);
   6913 		goto fail_1;
   6914 	}
   6915 
   6916 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6917 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6918 		aprint_error_dev(sc->sc_dev,
   6919 		    "unable to create TX control data DMA map, error = %d\n",
   6920 		    error);
   6921 		goto fail_2;
   6922 	}
   6923 
   6924 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6925 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6926 		aprint_error_dev(sc->sc_dev,
   6927 		    "unable to load TX control data DMA map, error = %d\n",
   6928 		    error);
   6929 		goto fail_3;
   6930 	}
   6931 
   6932 	return 0;
   6933 
   6934  fail_3:
   6935 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6936  fail_2:
   6937 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6938 	    WM_TXDESCS_SIZE(txq));
   6939  fail_1:
   6940 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6941  fail_0:
   6942 	return error;
   6943 }
   6944 
   6945 static void
   6946 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6947 {
   6948 
   6949 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6950 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6951 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6952 	    WM_TXDESCS_SIZE(txq));
   6953 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6954 }
   6955 
   6956 static int
   6957 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6958 {
   6959 	int error;
   6960 	size_t rxq_descs_size;
   6961 
   6962 	/*
   6963 	 * Allocate the control data structures, and create and load the
   6964 	 * DMA map for it.
   6965 	 *
   6966 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6967 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6968 	 * both sets within the same 4G segment.
   6969 	 */
   6970 	rxq->rxq_ndesc = WM_NRXDESC;
   6971 	if (sc->sc_type == WM_T_82574)
   6972 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6973 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6974 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6975 	else
   6976 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6977 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6978 
   6979 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6980 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6981 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6982 		aprint_error_dev(sc->sc_dev,
   6983 		    "unable to allocate RX control data, error = %d\n",
   6984 		    error);
   6985 		goto fail_0;
   6986 	}
   6987 
   6988 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6989 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6990 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6991 		aprint_error_dev(sc->sc_dev,
   6992 		    "unable to map RX control data, error = %d\n", error);
   6993 		goto fail_1;
   6994 	}
   6995 
   6996 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6997 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6998 		aprint_error_dev(sc->sc_dev,
   6999 		    "unable to create RX control data DMA map, error = %d\n",
   7000 		    error);
   7001 		goto fail_2;
   7002 	}
   7003 
   7004 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   7005 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   7006 		aprint_error_dev(sc->sc_dev,
   7007 		    "unable to load RX control data DMA map, error = %d\n",
   7008 		    error);
   7009 		goto fail_3;
   7010 	}
   7011 
   7012 	return 0;
   7013 
   7014  fail_3:
   7015 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7016  fail_2:
   7017 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7018 	    rxq_descs_size);
   7019  fail_1:
   7020 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7021  fail_0:
   7022 	return error;
   7023 }
   7024 
   7025 static void
   7026 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7027 {
   7028 
   7029 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7030 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7031 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7032 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   7033 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7034 }
   7035 
   7036 
   7037 static int
   7038 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7039 {
   7040 	int i, error;
   7041 
   7042 	/* Create the transmit buffer DMA maps. */
   7043 	WM_TXQUEUELEN(txq) =
   7044 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   7045 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   7046 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7047 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   7048 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   7049 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   7050 			aprint_error_dev(sc->sc_dev,
   7051 			    "unable to create Tx DMA map %d, error = %d\n",
   7052 			    i, error);
   7053 			goto fail;
   7054 		}
   7055 	}
   7056 
   7057 	return 0;
   7058 
   7059  fail:
   7060 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7061 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7062 			bus_dmamap_destroy(sc->sc_dmat,
   7063 			    txq->txq_soft[i].txs_dmamap);
   7064 	}
   7065 	return error;
   7066 }
   7067 
   7068 static void
   7069 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7070 {
   7071 	int i;
   7072 
   7073 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7074 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7075 			bus_dmamap_destroy(sc->sc_dmat,
   7076 			    txq->txq_soft[i].txs_dmamap);
   7077 	}
   7078 }
   7079 
   7080 static int
   7081 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7082 {
   7083 	int i, error;
   7084 
   7085 	/* Create the receive buffer DMA maps. */
   7086 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7087 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   7088 			    MCLBYTES, 0, 0,
   7089 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   7090 			aprint_error_dev(sc->sc_dev,
   7091 			    "unable to create Rx DMA map %d error = %d\n",
   7092 			    i, error);
   7093 			goto fail;
   7094 		}
   7095 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   7096 	}
   7097 
   7098 	return 0;
   7099 
   7100  fail:
   7101 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7102 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7103 			bus_dmamap_destroy(sc->sc_dmat,
   7104 			    rxq->rxq_soft[i].rxs_dmamap);
   7105 	}
   7106 	return error;
   7107 }
   7108 
   7109 static void
   7110 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7111 {
   7112 	int i;
   7113 
   7114 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7115 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7116 			bus_dmamap_destroy(sc->sc_dmat,
   7117 			    rxq->rxq_soft[i].rxs_dmamap);
   7118 	}
   7119 }
   7120 
   7121 /*
   7122  * wm_alloc_quques:
   7123  *	Allocate {tx,rx}descs and {tx,rx} buffers
   7124  */
   7125 static int
   7126 wm_alloc_txrx_queues(struct wm_softc *sc)
   7127 {
   7128 	int i, error, tx_done, rx_done;
   7129 
   7130 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   7131 	    KM_SLEEP);
   7132 	if (sc->sc_queue == NULL) {
   7133 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   7134 		error = ENOMEM;
   7135 		goto fail_0;
   7136 	}
   7137 
   7138 	/* For transmission */
   7139 	error = 0;
   7140 	tx_done = 0;
   7141 	for (i = 0; i < sc->sc_nqueues; i++) {
   7142 #ifdef WM_EVENT_COUNTERS
   7143 		int j;
   7144 		const char *xname;
   7145 #endif
   7146 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7147 		txq->txq_sc = sc;
   7148 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7149 
   7150 		error = wm_alloc_tx_descs(sc, txq);
   7151 		if (error)
   7152 			break;
   7153 		error = wm_alloc_tx_buffer(sc, txq);
   7154 		if (error) {
   7155 			wm_free_tx_descs(sc, txq);
   7156 			break;
   7157 		}
   7158 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   7159 		if (txq->txq_interq == NULL) {
   7160 			wm_free_tx_descs(sc, txq);
   7161 			wm_free_tx_buffer(sc, txq);
   7162 			error = ENOMEM;
   7163 			break;
   7164 		}
   7165 
   7166 #ifdef WM_EVENT_COUNTERS
   7167 		xname = device_xname(sc->sc_dev);
   7168 
   7169 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   7170 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   7171 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   7172 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   7173 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   7174 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   7175 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   7176 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   7177 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   7178 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   7179 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   7180 
   7181 		for (j = 0; j < WM_NTXSEGS; j++) {
   7182 			snprintf(txq->txq_txseg_evcnt_names[j],
   7183 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   7184 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   7185 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   7186 		}
   7187 
   7188 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   7189 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   7190 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   7191 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   7192 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   7193 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   7194 #endif /* WM_EVENT_COUNTERS */
   7195 
   7196 		tx_done++;
   7197 	}
   7198 	if (error)
   7199 		goto fail_1;
   7200 
   7201 	/* For receive */
   7202 	error = 0;
   7203 	rx_done = 0;
   7204 	for (i = 0; i < sc->sc_nqueues; i++) {
   7205 #ifdef WM_EVENT_COUNTERS
   7206 		const char *xname;
   7207 #endif
   7208 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7209 		rxq->rxq_sc = sc;
   7210 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7211 
   7212 		error = wm_alloc_rx_descs(sc, rxq);
   7213 		if (error)
   7214 			break;
   7215 
   7216 		error = wm_alloc_rx_buffer(sc, rxq);
   7217 		if (error) {
   7218 			wm_free_rx_descs(sc, rxq);
   7219 			break;
   7220 		}
   7221 
   7222 #ifdef WM_EVENT_COUNTERS
   7223 		xname = device_xname(sc->sc_dev);
   7224 
   7225 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   7226 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   7227 
   7228 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   7229 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   7230 #endif /* WM_EVENT_COUNTERS */
   7231 
   7232 		rx_done++;
   7233 	}
   7234 	if (error)
   7235 		goto fail_2;
   7236 
   7237 	return 0;
   7238 
   7239  fail_2:
   7240 	for (i = 0; i < rx_done; i++) {
   7241 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7242 		wm_free_rx_buffer(sc, rxq);
   7243 		wm_free_rx_descs(sc, rxq);
   7244 		if (rxq->rxq_lock)
   7245 			mutex_obj_free(rxq->rxq_lock);
   7246 	}
   7247  fail_1:
   7248 	for (i = 0; i < tx_done; i++) {
   7249 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7250 		pcq_destroy(txq->txq_interq);
   7251 		wm_free_tx_buffer(sc, txq);
   7252 		wm_free_tx_descs(sc, txq);
   7253 		if (txq->txq_lock)
   7254 			mutex_obj_free(txq->txq_lock);
   7255 	}
   7256 
   7257 	kmem_free(sc->sc_queue,
   7258 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   7259  fail_0:
   7260 	return error;
   7261 }
   7262 
   7263 /*
   7264  * wm_free_quques:
   7265  *	Free {tx,rx}descs and {tx,rx} buffers
   7266  */
   7267 static void
   7268 wm_free_txrx_queues(struct wm_softc *sc)
   7269 {
   7270 	int i;
   7271 
   7272 	for (i = 0; i < sc->sc_nqueues; i++) {
   7273 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7274 
   7275 #ifdef WM_EVENT_COUNTERS
   7276 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7277 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7278 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7279 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7280 #endif /* WM_EVENT_COUNTERS */
   7281 
   7282 		wm_free_rx_buffer(sc, rxq);
   7283 		wm_free_rx_descs(sc, rxq);
   7284 		if (rxq->rxq_lock)
   7285 			mutex_obj_free(rxq->rxq_lock);
   7286 	}
   7287 
   7288 	for (i = 0; i < sc->sc_nqueues; i++) {
   7289 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7290 		struct mbuf *m;
   7291 #ifdef WM_EVENT_COUNTERS
   7292 		int j;
   7293 
   7294 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7295 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7296 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7297 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7298 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7299 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7300 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7301 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7302 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7303 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7304 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7305 
   7306 		for (j = 0; j < WM_NTXSEGS; j++)
   7307 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7308 
   7309 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7310 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7311 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7312 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7313 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7314 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   7315 #endif /* WM_EVENT_COUNTERS */
   7316 
   7317 		/* Drain txq_interq */
   7318 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7319 			m_freem(m);
   7320 		pcq_destroy(txq->txq_interq);
   7321 
   7322 		wm_free_tx_buffer(sc, txq);
   7323 		wm_free_tx_descs(sc, txq);
   7324 		if (txq->txq_lock)
   7325 			mutex_obj_free(txq->txq_lock);
   7326 	}
   7327 
   7328 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   7329 }
   7330 
   7331 static void
   7332 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7333 {
   7334 
   7335 	KASSERT(mutex_owned(txq->txq_lock));
   7336 
   7337 	/* Initialize the transmit descriptor ring. */
   7338 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   7339 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   7340 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7341 	txq->txq_free = WM_NTXDESC(txq);
   7342 	txq->txq_next = 0;
   7343 }
   7344 
   7345 static void
   7346 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7347     struct wm_txqueue *txq)
   7348 {
   7349 
   7350 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7351 		device_xname(sc->sc_dev), __func__));
   7352 	KASSERT(mutex_owned(txq->txq_lock));
   7353 
   7354 	if (sc->sc_type < WM_T_82543) {
   7355 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   7356 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   7357 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   7358 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   7359 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   7360 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   7361 	} else {
   7362 		int qid = wmq->wmq_id;
   7363 
   7364 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   7365 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   7366 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   7367 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   7368 
   7369 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7370 			/*
   7371 			 * Don't write TDT before TCTL.EN is set.
   7372 			 * See the document.
   7373 			 */
   7374 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7375 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7376 			    | TXDCTL_WTHRESH(0));
   7377 		else {
   7378 			/* XXX should update with AIM? */
   7379 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7380 			if (sc->sc_type >= WM_T_82540) {
   7381 				/* Should be the same */
   7382 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7383 			}
   7384 
   7385 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7386 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7387 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7388 		}
   7389 	}
   7390 }
   7391 
   7392 static void
   7393 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7394 {
   7395 	int i;
   7396 
   7397 	KASSERT(mutex_owned(txq->txq_lock));
   7398 
   7399 	/* Initialize the transmit job descriptors. */
   7400 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7401 		txq->txq_soft[i].txs_mbuf = NULL;
   7402 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7403 	txq->txq_snext = 0;
   7404 	txq->txq_sdirty = 0;
   7405 }
   7406 
   7407 static void
   7408 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7409     struct wm_txqueue *txq)
   7410 {
   7411 
   7412 	KASSERT(mutex_owned(txq->txq_lock));
   7413 
   7414 	/*
   7415 	 * Set up some register offsets that are different between
   7416 	 * the i82542 and the i82543 and later chips.
   7417 	 */
   7418 	if (sc->sc_type < WM_T_82543)
   7419 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7420 	else
   7421 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7422 
   7423 	wm_init_tx_descs(sc, txq);
   7424 	wm_init_tx_regs(sc, wmq, txq);
   7425 	wm_init_tx_buffer(sc, txq);
   7426 
   7427 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7428 	txq->txq_sending = false;
   7429 }
   7430 
   7431 static void
   7432 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7433     struct wm_rxqueue *rxq)
   7434 {
   7435 
   7436 	KASSERT(mutex_owned(rxq->rxq_lock));
   7437 
   7438 	/*
   7439 	 * Initialize the receive descriptor and receive job
   7440 	 * descriptor rings.
   7441 	 */
   7442 	if (sc->sc_type < WM_T_82543) {
   7443 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7444 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7445 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7446 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7447 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7448 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7449 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7450 
   7451 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7452 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7453 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7454 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7455 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7456 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7457 	} else {
   7458 		int qid = wmq->wmq_id;
   7459 
   7460 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7461 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7462 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7463 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7464 
   7465 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7466 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7467 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7468 
   7469 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7470 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7471 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7472 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7473 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7474 			    | RXDCTL_WTHRESH(1));
   7475 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7476 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7477 		} else {
   7478 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7479 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7480 			/* XXX should update with AIM? */
   7481 			CSR_WRITE(sc, WMREG_RDTR,
   7482 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7483 			/* MUST be same */
   7484 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7485 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7486 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7487 		}
   7488 	}
   7489 }
   7490 
   7491 static int
   7492 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7493 {
   7494 	struct wm_rxsoft *rxs;
   7495 	int error, i;
   7496 
   7497 	KASSERT(mutex_owned(rxq->rxq_lock));
   7498 
   7499 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7500 		rxs = &rxq->rxq_soft[i];
   7501 		if (rxs->rxs_mbuf == NULL) {
   7502 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7503 				log(LOG_ERR, "%s: unable to allocate or map "
   7504 				    "rx buffer %d, error = %d\n",
   7505 				    device_xname(sc->sc_dev), i, error);
   7506 				/*
   7507 				 * XXX Should attempt to run with fewer receive
   7508 				 * XXX buffers instead of just failing.
   7509 				 */
   7510 				wm_rxdrain(rxq);
   7511 				return ENOMEM;
   7512 			}
   7513 		} else {
   7514 			/*
   7515 			 * For 82575 and 82576, the RX descriptors must be
   7516 			 * initialized after the setting of RCTL.EN in
   7517 			 * wm_set_filter()
   7518 			 */
   7519 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7520 				wm_init_rxdesc(rxq, i);
   7521 		}
   7522 	}
   7523 	rxq->rxq_ptr = 0;
   7524 	rxq->rxq_discard = 0;
   7525 	WM_RXCHAIN_RESET(rxq);
   7526 
   7527 	return 0;
   7528 }
   7529 
   7530 static int
   7531 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7532     struct wm_rxqueue *rxq)
   7533 {
   7534 
   7535 	KASSERT(mutex_owned(rxq->rxq_lock));
   7536 
   7537 	/*
   7538 	 * Set up some register offsets that are different between
   7539 	 * the i82542 and the i82543 and later chips.
   7540 	 */
   7541 	if (sc->sc_type < WM_T_82543)
   7542 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7543 	else
   7544 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7545 
   7546 	wm_init_rx_regs(sc, wmq, rxq);
   7547 	return wm_init_rx_buffer(sc, rxq);
   7548 }
   7549 
   7550 /*
   7551  * wm_init_quques:
   7552  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7553  */
   7554 static int
   7555 wm_init_txrx_queues(struct wm_softc *sc)
   7556 {
   7557 	int i, error = 0;
   7558 
   7559 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7560 		device_xname(sc->sc_dev), __func__));
   7561 
   7562 	for (i = 0; i < sc->sc_nqueues; i++) {
   7563 		struct wm_queue *wmq = &sc->sc_queue[i];
   7564 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7565 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7566 
   7567 		/*
   7568 		 * TODO
   7569 		 * Currently, use constant variable instead of AIM.
   7570 		 * Furthermore, the interrupt interval of multiqueue which use
   7571 		 * polling mode is less than default value.
   7572 		 * More tuning and AIM are required.
   7573 		 */
   7574 		if (wm_is_using_multiqueue(sc))
   7575 			wmq->wmq_itr = 50;
   7576 		else
   7577 			wmq->wmq_itr = sc->sc_itr_init;
   7578 		wmq->wmq_set_itr = true;
   7579 
   7580 		mutex_enter(txq->txq_lock);
   7581 		wm_init_tx_queue(sc, wmq, txq);
   7582 		mutex_exit(txq->txq_lock);
   7583 
   7584 		mutex_enter(rxq->rxq_lock);
   7585 		error = wm_init_rx_queue(sc, wmq, rxq);
   7586 		mutex_exit(rxq->rxq_lock);
   7587 		if (error)
   7588 			break;
   7589 	}
   7590 
   7591 	return error;
   7592 }
   7593 
   7594 /*
   7595  * wm_tx_offload:
   7596  *
   7597  *	Set up TCP/IP checksumming parameters for the
   7598  *	specified packet.
   7599  */
   7600 static void
   7601 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7602     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7603 {
   7604 	struct mbuf *m0 = txs->txs_mbuf;
   7605 	struct livengood_tcpip_ctxdesc *t;
   7606 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7607 	uint32_t ipcse;
   7608 	struct ether_header *eh;
   7609 	int offset, iphl;
   7610 	uint8_t fields;
   7611 
   7612 	/*
   7613 	 * XXX It would be nice if the mbuf pkthdr had offset
   7614 	 * fields for the protocol headers.
   7615 	 */
   7616 
   7617 	eh = mtod(m0, struct ether_header *);
   7618 	switch (htons(eh->ether_type)) {
   7619 	case ETHERTYPE_IP:
   7620 	case ETHERTYPE_IPV6:
   7621 		offset = ETHER_HDR_LEN;
   7622 		break;
   7623 
   7624 	case ETHERTYPE_VLAN:
   7625 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7626 		break;
   7627 
   7628 	default:
   7629 		/* Don't support this protocol or encapsulation. */
   7630 		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   7631 		txq->txq_last_hw_ipcs = 0;
   7632 		txq->txq_last_hw_tucs = 0;
   7633 		*fieldsp = 0;
   7634 		*cmdp = 0;
   7635 		return;
   7636 	}
   7637 
   7638 	if ((m0->m_pkthdr.csum_flags &
   7639 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7640 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7641 	} else
   7642 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7643 
   7644 	ipcse = offset + iphl - 1;
   7645 
   7646 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7647 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7648 	seg = 0;
   7649 	fields = 0;
   7650 
   7651 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7652 		int hlen = offset + iphl;
   7653 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7654 
   7655 		if (__predict_false(m0->m_len <
   7656 				    (hlen + sizeof(struct tcphdr)))) {
   7657 			/*
   7658 			 * TCP/IP headers are not in the first mbuf; we need
   7659 			 * to do this the slow and painful way. Let's just
   7660 			 * hope this doesn't happen very often.
   7661 			 */
   7662 			struct tcphdr th;
   7663 
   7664 			WM_Q_EVCNT_INCR(txq, tsopain);
   7665 
   7666 			m_copydata(m0, hlen, sizeof(th), &th);
   7667 			if (v4) {
   7668 				struct ip ip;
   7669 
   7670 				m_copydata(m0, offset, sizeof(ip), &ip);
   7671 				ip.ip_len = 0;
   7672 				m_copyback(m0,
   7673 				    offset + offsetof(struct ip, ip_len),
   7674 				    sizeof(ip.ip_len), &ip.ip_len);
   7675 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7676 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7677 			} else {
   7678 				struct ip6_hdr ip6;
   7679 
   7680 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7681 				ip6.ip6_plen = 0;
   7682 				m_copyback(m0,
   7683 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7684 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7685 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7686 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7687 			}
   7688 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7689 			    sizeof(th.th_sum), &th.th_sum);
   7690 
   7691 			hlen += th.th_off << 2;
   7692 		} else {
   7693 			/*
   7694 			 * TCP/IP headers are in the first mbuf; we can do
   7695 			 * this the easy way.
   7696 			 */
   7697 			struct tcphdr *th;
   7698 
   7699 			if (v4) {
   7700 				struct ip *ip =
   7701 				    (void *)(mtod(m0, char *) + offset);
   7702 				th = (void *)(mtod(m0, char *) + hlen);
   7703 
   7704 				ip->ip_len = 0;
   7705 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7706 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7707 			} else {
   7708 				struct ip6_hdr *ip6 =
   7709 				    (void *)(mtod(m0, char *) + offset);
   7710 				th = (void *)(mtod(m0, char *) + hlen);
   7711 
   7712 				ip6->ip6_plen = 0;
   7713 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7714 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7715 			}
   7716 			hlen += th->th_off << 2;
   7717 		}
   7718 
   7719 		if (v4) {
   7720 			WM_Q_EVCNT_INCR(txq, tso);
   7721 			cmdlen |= WTX_TCPIP_CMD_IP;
   7722 		} else {
   7723 			WM_Q_EVCNT_INCR(txq, tso6);
   7724 			ipcse = 0;
   7725 		}
   7726 		cmd |= WTX_TCPIP_CMD_TSE;
   7727 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7728 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7729 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7730 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7731 	}
   7732 
   7733 	/*
   7734 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7735 	 * offload feature, if we load the context descriptor, we
   7736 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7737 	 */
   7738 
   7739 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7740 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7741 	    WTX_TCPIP_IPCSE(ipcse);
   7742 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7743 		WM_Q_EVCNT_INCR(txq, ipsum);
   7744 		fields |= WTX_IXSM;
   7745 	}
   7746 
   7747 	offset += iphl;
   7748 
   7749 	if (m0->m_pkthdr.csum_flags &
   7750 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7751 		WM_Q_EVCNT_INCR(txq, tusum);
   7752 		fields |= WTX_TXSM;
   7753 		tucs = WTX_TCPIP_TUCSS(offset) |
   7754 		    WTX_TCPIP_TUCSO(offset +
   7755 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7756 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7757 	} else if ((m0->m_pkthdr.csum_flags &
   7758 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7759 		WM_Q_EVCNT_INCR(txq, tusum6);
   7760 		fields |= WTX_TXSM;
   7761 		tucs = WTX_TCPIP_TUCSS(offset) |
   7762 		    WTX_TCPIP_TUCSO(offset +
   7763 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7764 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7765 	} else {
   7766 		/* Just initialize it to a valid TCP context. */
   7767 		tucs = WTX_TCPIP_TUCSS(offset) |
   7768 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7769 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7770 	}
   7771 
   7772 	*cmdp = cmd;
   7773 	*fieldsp = fields;
   7774 
   7775 	/*
   7776 	 * We don't have to write context descriptor for every packet
   7777 	 * except for 82574. For 82574, we must write context descriptor
   7778 	 * for every packet when we use two descriptor queues.
   7779 	 *
   7780 	 * The 82574L can only remember the *last* context used
   7781 	 * regardless of queue that it was use for.  We cannot reuse
   7782 	 * contexts on this hardware platform and must generate a new
   7783 	 * context every time.  82574L hardware spec, section 7.2.6,
   7784 	 * second note.
   7785 	 */
   7786 	if (sc->sc_nqueues < 2) {
   7787 		/*
   7788 		 * Setting up new checksum offload context for every
   7789 		 * frames takes a lot of processing time for hardware.
   7790 		 * This also reduces performance a lot for small sized
   7791 		 * frames so avoid it if driver can use previously
   7792 		 * configured checksum offload context.
   7793 		 * For TSO, in theory we can use the same TSO context only if
   7794 		 * frame is the same type(IP/TCP) and the same MSS. However
   7795 		 * checking whether a frame has the same IP/TCP structure is
   7796 		 * hard thing so just ignore that and always restablish a
   7797 		 * new TSO context.
   7798 		 */
   7799 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   7800 		    == 0) {
   7801 			if (txq->txq_last_hw_cmd == cmd &&
   7802 			    txq->txq_last_hw_fields == fields &&
   7803 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   7804 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   7805 				WM_Q_EVCNT_INCR(txq, skipcontext);
   7806 				return;
   7807 			}
   7808 		}
   7809 
   7810 		txq->txq_last_hw_cmd = cmd;
   7811 		txq->txq_last_hw_fields = fields;
   7812 		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   7813 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   7814 	}
   7815 
   7816 	/* Fill in the context descriptor. */
   7817 	t = (struct livengood_tcpip_ctxdesc *)
   7818 	    &txq->txq_descs[txq->txq_next];
   7819 	t->tcpip_ipcs = htole32(ipcs);
   7820 	t->tcpip_tucs = htole32(tucs);
   7821 	t->tcpip_cmdlen = htole32(cmdlen);
   7822 	t->tcpip_seg = htole32(seg);
   7823 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7824 
   7825 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7826 	txs->txs_ndesc++;
   7827 }
   7828 
   7829 static inline int
   7830 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7831 {
   7832 	struct wm_softc *sc = ifp->if_softc;
   7833 	u_int cpuid = cpu_index(curcpu());
   7834 
   7835 	/*
   7836 	 * Currently, simple distribute strategy.
   7837 	 * TODO:
   7838 	 * distribute by flowid(RSS has value).
   7839 	 */
   7840 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7841 }
   7842 
   7843 static inline bool
   7844 wm_linkdown_discard(struct wm_txqueue *txq)
   7845 {
   7846 
   7847 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
   7848 		return true;
   7849 
   7850 	return false;
   7851 }
   7852 
   7853 /*
   7854  * wm_start:		[ifnet interface function]
   7855  *
   7856  *	Start packet transmission on the interface.
   7857  */
   7858 static void
   7859 wm_start(struct ifnet *ifp)
   7860 {
   7861 	struct wm_softc *sc = ifp->if_softc;
   7862 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7863 
   7864 #ifdef WM_MPSAFE
   7865 	KASSERT(if_is_mpsafe(ifp));
   7866 #endif
   7867 	/*
   7868 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   7869 	 */
   7870 
   7871 	mutex_enter(txq->txq_lock);
   7872 	if (!txq->txq_stopping)
   7873 		wm_start_locked(ifp);
   7874 	mutex_exit(txq->txq_lock);
   7875 }
   7876 
   7877 static void
   7878 wm_start_locked(struct ifnet *ifp)
   7879 {
   7880 	struct wm_softc *sc = ifp->if_softc;
   7881 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7882 
   7883 	wm_send_common_locked(ifp, txq, false);
   7884 }
   7885 
   7886 static int
   7887 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7888 {
   7889 	int qid;
   7890 	struct wm_softc *sc = ifp->if_softc;
   7891 	struct wm_txqueue *txq;
   7892 
   7893 	qid = wm_select_txqueue(ifp, m);
   7894 	txq = &sc->sc_queue[qid].wmq_txq;
   7895 
   7896 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7897 		m_freem(m);
   7898 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7899 		return ENOBUFS;
   7900 	}
   7901 
   7902 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   7903 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   7904 	if (m->m_flags & M_MCAST)
   7905 		if_statinc_ref(nsr, if_omcasts);
   7906 	IF_STAT_PUTREF(ifp);
   7907 
   7908 	if (mutex_tryenter(txq->txq_lock)) {
   7909 		if (!txq->txq_stopping)
   7910 			wm_transmit_locked(ifp, txq);
   7911 		mutex_exit(txq->txq_lock);
   7912 	}
   7913 
   7914 	return 0;
   7915 }
   7916 
   7917 static void
   7918 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7919 {
   7920 
   7921 	wm_send_common_locked(ifp, txq, true);
   7922 }
   7923 
   7924 static void
   7925 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7926     bool is_transmit)
   7927 {
   7928 	struct wm_softc *sc = ifp->if_softc;
   7929 	struct mbuf *m0;
   7930 	struct wm_txsoft *txs;
   7931 	bus_dmamap_t dmamap;
   7932 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7933 	bus_addr_t curaddr;
   7934 	bus_size_t seglen, curlen;
   7935 	uint32_t cksumcmd;
   7936 	uint8_t cksumfields;
   7937 	bool remap = true;
   7938 
   7939 	KASSERT(mutex_owned(txq->txq_lock));
   7940 
   7941 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7942 		return;
   7943 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7944 		return;
   7945 
   7946 	if (__predict_false(wm_linkdown_discard(txq))) {
   7947 		do {
   7948 			if (is_transmit)
   7949 				m0 = pcq_get(txq->txq_interq);
   7950 			else
   7951 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   7952 			/*
   7953 			 * increment successed packet counter as in the case
   7954 			 * which the packet is discarded by link down PHY.
   7955 			 */
   7956 			if (m0 != NULL)
   7957 				if_statinc(ifp, if_opackets);
   7958 			m_freem(m0);
   7959 		} while (m0 != NULL);
   7960 		return;
   7961 	}
   7962 
   7963 	/* Remember the previous number of free descriptors. */
   7964 	ofree = txq->txq_free;
   7965 
   7966 	/*
   7967 	 * Loop through the send queue, setting up transmit descriptors
   7968 	 * until we drain the queue, or use up all available transmit
   7969 	 * descriptors.
   7970 	 */
   7971 	for (;;) {
   7972 		m0 = NULL;
   7973 
   7974 		/* Get a work queue entry. */
   7975 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7976 			wm_txeof(txq, UINT_MAX);
   7977 			if (txq->txq_sfree == 0) {
   7978 				DPRINTF(sc, WM_DEBUG_TX,
   7979 				    ("%s: TX: no free job descriptors\n",
   7980 					device_xname(sc->sc_dev)));
   7981 				WM_Q_EVCNT_INCR(txq, txsstall);
   7982 				break;
   7983 			}
   7984 		}
   7985 
   7986 		/* Grab a packet off the queue. */
   7987 		if (is_transmit)
   7988 			m0 = pcq_get(txq->txq_interq);
   7989 		else
   7990 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7991 		if (m0 == NULL)
   7992 			break;
   7993 
   7994 		DPRINTF(sc, WM_DEBUG_TX,
   7995 		    ("%s: TX: have packet to transmit: %p\n",
   7996 			device_xname(sc->sc_dev), m0));
   7997 
   7998 		txs = &txq->txq_soft[txq->txq_snext];
   7999 		dmamap = txs->txs_dmamap;
   8000 
   8001 		use_tso = (m0->m_pkthdr.csum_flags &
   8002 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   8003 
   8004 		/*
   8005 		 * So says the Linux driver:
   8006 		 * The controller does a simple calculation to make sure
   8007 		 * there is enough room in the FIFO before initiating the
   8008 		 * DMA for each buffer. The calc is:
   8009 		 *	4 = ceil(buffer len / MSS)
   8010 		 * To make sure we don't overrun the FIFO, adjust the max
   8011 		 * buffer len if the MSS drops.
   8012 		 */
   8013 		dmamap->dm_maxsegsz =
   8014 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   8015 		    ? m0->m_pkthdr.segsz << 2
   8016 		    : WTX_MAX_LEN;
   8017 
   8018 		/*
   8019 		 * Load the DMA map.  If this fails, the packet either
   8020 		 * didn't fit in the allotted number of segments, or we
   8021 		 * were short on resources.  For the too-many-segments
   8022 		 * case, we simply report an error and drop the packet,
   8023 		 * since we can't sanely copy a jumbo packet to a single
   8024 		 * buffer.
   8025 		 */
   8026 retry:
   8027 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8028 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8029 		if (__predict_false(error)) {
   8030 			if (error == EFBIG) {
   8031 				if (remap == true) {
   8032 					struct mbuf *m;
   8033 
   8034 					remap = false;
   8035 					m = m_defrag(m0, M_NOWAIT);
   8036 					if (m != NULL) {
   8037 						WM_Q_EVCNT_INCR(txq, defrag);
   8038 						m0 = m;
   8039 						goto retry;
   8040 					}
   8041 				}
   8042 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8043 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8044 				    "DMA segments, dropping...\n",
   8045 				    device_xname(sc->sc_dev));
   8046 				wm_dump_mbuf_chain(sc, m0);
   8047 				m_freem(m0);
   8048 				continue;
   8049 			}
   8050 			/* Short on resources, just stop for now. */
   8051 			DPRINTF(sc, WM_DEBUG_TX,
   8052 			    ("%s: TX: dmamap load failed: %d\n",
   8053 				device_xname(sc->sc_dev), error));
   8054 			break;
   8055 		}
   8056 
   8057 		segs_needed = dmamap->dm_nsegs;
   8058 		if (use_tso) {
   8059 			/* For sentinel descriptor; see below. */
   8060 			segs_needed++;
   8061 		}
   8062 
   8063 		/*
   8064 		 * Ensure we have enough descriptors free to describe
   8065 		 * the packet. Note, we always reserve one descriptor
   8066 		 * at the end of the ring due to the semantics of the
   8067 		 * TDT register, plus one more in the event we need
   8068 		 * to load offload context.
   8069 		 */
   8070 		if (segs_needed > txq->txq_free - 2) {
   8071 			/*
   8072 			 * Not enough free descriptors to transmit this
   8073 			 * packet.  We haven't committed anything yet,
   8074 			 * so just unload the DMA map, put the packet
   8075 			 * pack on the queue, and punt. Notify the upper
   8076 			 * layer that there are no more slots left.
   8077 			 */
   8078 			DPRINTF(sc, WM_DEBUG_TX,
   8079 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8080 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8081 				segs_needed, txq->txq_free - 1));
   8082 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8083 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8084 			WM_Q_EVCNT_INCR(txq, txdstall);
   8085 			break;
   8086 		}
   8087 
   8088 		/*
   8089 		 * Check for 82547 Tx FIFO bug. We need to do this
   8090 		 * once we know we can transmit the packet, since we
   8091 		 * do some internal FIFO space accounting here.
   8092 		 */
   8093 		if (sc->sc_type == WM_T_82547 &&
   8094 		    wm_82547_txfifo_bugchk(sc, m0)) {
   8095 			DPRINTF(sc, WM_DEBUG_TX,
   8096 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   8097 				device_xname(sc->sc_dev)));
   8098 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8099 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8100 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   8101 			break;
   8102 		}
   8103 
   8104 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8105 
   8106 		DPRINTF(sc, WM_DEBUG_TX,
   8107 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8108 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8109 
   8110 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8111 
   8112 		/*
   8113 		 * Store a pointer to the packet so that we can free it
   8114 		 * later.
   8115 		 *
   8116 		 * Initially, we consider the number of descriptors the
   8117 		 * packet uses the number of DMA segments.  This may be
   8118 		 * incremented by 1 if we do checksum offload (a descriptor
   8119 		 * is used to set the checksum context).
   8120 		 */
   8121 		txs->txs_mbuf = m0;
   8122 		txs->txs_firstdesc = txq->txq_next;
   8123 		txs->txs_ndesc = segs_needed;
   8124 
   8125 		/* Set up offload parameters for this packet. */
   8126 		if (m0->m_pkthdr.csum_flags &
   8127 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8128 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8129 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8130 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   8131 		} else {
   8132 			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8133 			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   8134 			cksumcmd = 0;
   8135 			cksumfields = 0;
   8136 		}
   8137 
   8138 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   8139 
   8140 		/* Sync the DMA map. */
   8141 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8142 		    BUS_DMASYNC_PREWRITE);
   8143 
   8144 		/* Initialize the transmit descriptor. */
   8145 		for (nexttx = txq->txq_next, seg = 0;
   8146 		     seg < dmamap->dm_nsegs; seg++) {
   8147 			for (seglen = dmamap->dm_segs[seg].ds_len,
   8148 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   8149 			     seglen != 0;
   8150 			     curaddr += curlen, seglen -= curlen,
   8151 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   8152 				curlen = seglen;
   8153 
   8154 				/*
   8155 				 * So says the Linux driver:
   8156 				 * Work around for premature descriptor
   8157 				 * write-backs in TSO mode.  Append a
   8158 				 * 4-byte sentinel descriptor.
   8159 				 */
   8160 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   8161 				    curlen > 8)
   8162 					curlen -= 4;
   8163 
   8164 				wm_set_dma_addr(
   8165 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   8166 				txq->txq_descs[nexttx].wtx_cmdlen
   8167 				    = htole32(cksumcmd | curlen);
   8168 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   8169 				    = 0;
   8170 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   8171 				    = cksumfields;
   8172 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8173 				lasttx = nexttx;
   8174 
   8175 				DPRINTF(sc, WM_DEBUG_TX,
   8176 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   8177 					"len %#04zx\n",
   8178 					device_xname(sc->sc_dev), nexttx,
   8179 					(uint64_t)curaddr, curlen));
   8180 			}
   8181 		}
   8182 
   8183 		KASSERT(lasttx != -1);
   8184 
   8185 		/*
   8186 		 * Set up the command byte on the last descriptor of
   8187 		 * the packet. If we're in the interrupt delay window,
   8188 		 * delay the interrupt.
   8189 		 */
   8190 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8191 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8192 
   8193 		/*
   8194 		 * If VLANs are enabled and the packet has a VLAN tag, set
   8195 		 * up the descriptor to encapsulate the packet for us.
   8196 		 *
   8197 		 * This is only valid on the last descriptor of the packet.
   8198 		 */
   8199 		if (vlan_has_tag(m0)) {
   8200 			txq->txq_descs[lasttx].wtx_cmdlen |=
   8201 			    htole32(WTX_CMD_VLE);
   8202 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   8203 			    = htole16(vlan_get_tag(m0));
   8204 		}
   8205 
   8206 		txs->txs_lastdesc = lasttx;
   8207 
   8208 		DPRINTF(sc, WM_DEBUG_TX,
   8209 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8210 			device_xname(sc->sc_dev),
   8211 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8212 
   8213 		/* Sync the descriptors we're using. */
   8214 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8215 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8216 
   8217 		/* Give the packet to the chip. */
   8218 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8219 
   8220 		DPRINTF(sc, WM_DEBUG_TX,
   8221 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8222 
   8223 		DPRINTF(sc, WM_DEBUG_TX,
   8224 		    ("%s: TX: finished transmitting packet, job %d\n",
   8225 			device_xname(sc->sc_dev), txq->txq_snext));
   8226 
   8227 		/* Advance the tx pointer. */
   8228 		txq->txq_free -= txs->txs_ndesc;
   8229 		txq->txq_next = nexttx;
   8230 
   8231 		txq->txq_sfree--;
   8232 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8233 
   8234 		/* Pass the packet to any BPF listeners. */
   8235 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8236 	}
   8237 
   8238 	if (m0 != NULL) {
   8239 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8240 		WM_Q_EVCNT_INCR(txq, descdrop);
   8241 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8242 			__func__));
   8243 		m_freem(m0);
   8244 	}
   8245 
   8246 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8247 		/* No more slots; notify upper layer. */
   8248 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8249 	}
   8250 
   8251 	if (txq->txq_free != ofree) {
   8252 		/* Set a watchdog timer in case the chip flakes out. */
   8253 		txq->txq_lastsent = time_uptime;
   8254 		txq->txq_sending = true;
   8255 	}
   8256 }
   8257 
   8258 /*
   8259  * wm_nq_tx_offload:
   8260  *
   8261  *	Set up TCP/IP checksumming parameters for the
   8262  *	specified packet, for NEWQUEUE devices
   8263  */
   8264 static void
   8265 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8266     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   8267 {
   8268 	struct mbuf *m0 = txs->txs_mbuf;
   8269 	uint32_t vl_len, mssidx, cmdc;
   8270 	struct ether_header *eh;
   8271 	int offset, iphl;
   8272 
   8273 	/*
   8274 	 * XXX It would be nice if the mbuf pkthdr had offset
   8275 	 * fields for the protocol headers.
   8276 	 */
   8277 	*cmdlenp = 0;
   8278 	*fieldsp = 0;
   8279 
   8280 	eh = mtod(m0, struct ether_header *);
   8281 	switch (htons(eh->ether_type)) {
   8282 	case ETHERTYPE_IP:
   8283 	case ETHERTYPE_IPV6:
   8284 		offset = ETHER_HDR_LEN;
   8285 		break;
   8286 
   8287 	case ETHERTYPE_VLAN:
   8288 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8289 		break;
   8290 
   8291 	default:
   8292 		/* Don't support this protocol or encapsulation. */
   8293 		*do_csum = false;
   8294 		return;
   8295 	}
   8296 	*do_csum = true;
   8297 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   8298 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   8299 
   8300 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   8301 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   8302 
   8303 	if ((m0->m_pkthdr.csum_flags &
   8304 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8305 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8306 	} else {
   8307 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8308 	}
   8309 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   8310 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   8311 
   8312 	if (vlan_has_tag(m0)) {
   8313 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8314 		    << NQTXC_VLLEN_VLAN_SHIFT);
   8315 		*cmdlenp |= NQTX_CMD_VLE;
   8316 	}
   8317 
   8318 	mssidx = 0;
   8319 
   8320 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8321 		int hlen = offset + iphl;
   8322 		int tcp_hlen;
   8323 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8324 
   8325 		if (__predict_false(m0->m_len <
   8326 				    (hlen + sizeof(struct tcphdr)))) {
   8327 			/*
   8328 			 * TCP/IP headers are not in the first mbuf; we need
   8329 			 * to do this the slow and painful way. Let's just
   8330 			 * hope this doesn't happen very often.
   8331 			 */
   8332 			struct tcphdr th;
   8333 
   8334 			WM_Q_EVCNT_INCR(txq, tsopain);
   8335 
   8336 			m_copydata(m0, hlen, sizeof(th), &th);
   8337 			if (v4) {
   8338 				struct ip ip;
   8339 
   8340 				m_copydata(m0, offset, sizeof(ip), &ip);
   8341 				ip.ip_len = 0;
   8342 				m_copyback(m0,
   8343 				    offset + offsetof(struct ip, ip_len),
   8344 				    sizeof(ip.ip_len), &ip.ip_len);
   8345 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8346 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8347 			} else {
   8348 				struct ip6_hdr ip6;
   8349 
   8350 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8351 				ip6.ip6_plen = 0;
   8352 				m_copyback(m0,
   8353 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8354 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8355 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8356 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8357 			}
   8358 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8359 			    sizeof(th.th_sum), &th.th_sum);
   8360 
   8361 			tcp_hlen = th.th_off << 2;
   8362 		} else {
   8363 			/*
   8364 			 * TCP/IP headers are in the first mbuf; we can do
   8365 			 * this the easy way.
   8366 			 */
   8367 			struct tcphdr *th;
   8368 
   8369 			if (v4) {
   8370 				struct ip *ip =
   8371 				    (void *)(mtod(m0, char *) + offset);
   8372 				th = (void *)(mtod(m0, char *) + hlen);
   8373 
   8374 				ip->ip_len = 0;
   8375 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8376 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8377 			} else {
   8378 				struct ip6_hdr *ip6 =
   8379 				    (void *)(mtod(m0, char *) + offset);
   8380 				th = (void *)(mtod(m0, char *) + hlen);
   8381 
   8382 				ip6->ip6_plen = 0;
   8383 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8384 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8385 			}
   8386 			tcp_hlen = th->th_off << 2;
   8387 		}
   8388 		hlen += tcp_hlen;
   8389 		*cmdlenp |= NQTX_CMD_TSE;
   8390 
   8391 		if (v4) {
   8392 			WM_Q_EVCNT_INCR(txq, tso);
   8393 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   8394 		} else {
   8395 			WM_Q_EVCNT_INCR(txq, tso6);
   8396 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   8397 		}
   8398 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   8399 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8400 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   8401 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   8402 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   8403 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   8404 	} else {
   8405 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   8406 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8407 	}
   8408 
   8409 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   8410 		*fieldsp |= NQTXD_FIELDS_IXSM;
   8411 		cmdc |= NQTXC_CMD_IP4;
   8412 	}
   8413 
   8414 	if (m0->m_pkthdr.csum_flags &
   8415 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   8416 		WM_Q_EVCNT_INCR(txq, tusum);
   8417 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   8418 			cmdc |= NQTXC_CMD_TCP;
   8419 		else
   8420 			cmdc |= NQTXC_CMD_UDP;
   8421 
   8422 		cmdc |= NQTXC_CMD_IP4;
   8423 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8424 	}
   8425 	if (m0->m_pkthdr.csum_flags &
   8426 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8427 		WM_Q_EVCNT_INCR(txq, tusum6);
   8428 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8429 			cmdc |= NQTXC_CMD_TCP;
   8430 		else
   8431 			cmdc |= NQTXC_CMD_UDP;
   8432 
   8433 		cmdc |= NQTXC_CMD_IP6;
   8434 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8435 	}
   8436 
   8437 	/*
   8438 	 * We don't have to write context descriptor for every packet to
   8439 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8440 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8441 	 * controllers.
   8442 	 * It would be overhead to write context descriptor for every packet,
   8443 	 * however it does not cause problems.
   8444 	 */
   8445 	/* Fill in the context descriptor. */
   8446 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   8447 	    htole32(vl_len);
   8448 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8449 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8450 	    htole32(cmdc);
   8451 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8452 	    htole32(mssidx);
   8453 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8454 	DPRINTF(sc, WM_DEBUG_TX,
   8455 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8456 		txq->txq_next, 0, vl_len));
   8457 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8458 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8459 	txs->txs_ndesc++;
   8460 }
   8461 
   8462 /*
   8463  * wm_nq_start:		[ifnet interface function]
   8464  *
   8465  *	Start packet transmission on the interface for NEWQUEUE devices
   8466  */
   8467 static void
   8468 wm_nq_start(struct ifnet *ifp)
   8469 {
   8470 	struct wm_softc *sc = ifp->if_softc;
   8471 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8472 
   8473 #ifdef WM_MPSAFE
   8474 	KASSERT(if_is_mpsafe(ifp));
   8475 #endif
   8476 	/*
   8477 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8478 	 */
   8479 
   8480 	mutex_enter(txq->txq_lock);
   8481 	if (!txq->txq_stopping)
   8482 		wm_nq_start_locked(ifp);
   8483 	mutex_exit(txq->txq_lock);
   8484 }
   8485 
   8486 static void
   8487 wm_nq_start_locked(struct ifnet *ifp)
   8488 {
   8489 	struct wm_softc *sc = ifp->if_softc;
   8490 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8491 
   8492 	wm_nq_send_common_locked(ifp, txq, false);
   8493 }
   8494 
   8495 static int
   8496 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8497 {
   8498 	int qid;
   8499 	struct wm_softc *sc = ifp->if_softc;
   8500 	struct wm_txqueue *txq;
   8501 
   8502 	qid = wm_select_txqueue(ifp, m);
   8503 	txq = &sc->sc_queue[qid].wmq_txq;
   8504 
   8505 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8506 		m_freem(m);
   8507 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8508 		return ENOBUFS;
   8509 	}
   8510 
   8511 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8512 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8513 	if (m->m_flags & M_MCAST)
   8514 		if_statinc_ref(nsr, if_omcasts);
   8515 	IF_STAT_PUTREF(ifp);
   8516 
   8517 	/*
   8518 	 * The situations which this mutex_tryenter() fails at running time
   8519 	 * are below two patterns.
   8520 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8521 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8522 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8523 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8524 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8525 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8526 	 * stuck, either.
   8527 	 */
   8528 	if (mutex_tryenter(txq->txq_lock)) {
   8529 		if (!txq->txq_stopping)
   8530 			wm_nq_transmit_locked(ifp, txq);
   8531 		mutex_exit(txq->txq_lock);
   8532 	}
   8533 
   8534 	return 0;
   8535 }
   8536 
   8537 static void
   8538 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8539 {
   8540 
   8541 	wm_nq_send_common_locked(ifp, txq, true);
   8542 }
   8543 
   8544 static void
   8545 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8546     bool is_transmit)
   8547 {
   8548 	struct wm_softc *sc = ifp->if_softc;
   8549 	struct mbuf *m0;
   8550 	struct wm_txsoft *txs;
   8551 	bus_dmamap_t dmamap;
   8552 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8553 	bool do_csum, sent;
   8554 	bool remap = true;
   8555 
   8556 	KASSERT(mutex_owned(txq->txq_lock));
   8557 
   8558 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8559 		return;
   8560 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8561 		return;
   8562 
   8563 	if (__predict_false(wm_linkdown_discard(txq))) {
   8564 		do {
   8565 			if (is_transmit)
   8566 				m0 = pcq_get(txq->txq_interq);
   8567 			else
   8568 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   8569 			/*
   8570 			 * increment successed packet counter as in the case
   8571 			 * which the packet is discarded by link down PHY.
   8572 			 */
   8573 			if (m0 != NULL)
   8574 				if_statinc(ifp, if_opackets);
   8575 			m_freem(m0);
   8576 		} while (m0 != NULL);
   8577 		return;
   8578 	}
   8579 
   8580 	sent = false;
   8581 
   8582 	/*
   8583 	 * Loop through the send queue, setting up transmit descriptors
   8584 	 * until we drain the queue, or use up all available transmit
   8585 	 * descriptors.
   8586 	 */
   8587 	for (;;) {
   8588 		m0 = NULL;
   8589 
   8590 		/* Get a work queue entry. */
   8591 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8592 			wm_txeof(txq, UINT_MAX);
   8593 			if (txq->txq_sfree == 0) {
   8594 				DPRINTF(sc, WM_DEBUG_TX,
   8595 				    ("%s: TX: no free job descriptors\n",
   8596 					device_xname(sc->sc_dev)));
   8597 				WM_Q_EVCNT_INCR(txq, txsstall);
   8598 				break;
   8599 			}
   8600 		}
   8601 
   8602 		/* Grab a packet off the queue. */
   8603 		if (is_transmit)
   8604 			m0 = pcq_get(txq->txq_interq);
   8605 		else
   8606 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8607 		if (m0 == NULL)
   8608 			break;
   8609 
   8610 		DPRINTF(sc, WM_DEBUG_TX,
   8611 		    ("%s: TX: have packet to transmit: %p\n",
   8612 		    device_xname(sc->sc_dev), m0));
   8613 
   8614 		txs = &txq->txq_soft[txq->txq_snext];
   8615 		dmamap = txs->txs_dmamap;
   8616 
   8617 		/*
   8618 		 * Load the DMA map.  If this fails, the packet either
   8619 		 * didn't fit in the allotted number of segments, or we
   8620 		 * were short on resources.  For the too-many-segments
   8621 		 * case, we simply report an error and drop the packet,
   8622 		 * since we can't sanely copy a jumbo packet to a single
   8623 		 * buffer.
   8624 		 */
   8625 retry:
   8626 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8627 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8628 		if (__predict_false(error)) {
   8629 			if (error == EFBIG) {
   8630 				if (remap == true) {
   8631 					struct mbuf *m;
   8632 
   8633 					remap = false;
   8634 					m = m_defrag(m0, M_NOWAIT);
   8635 					if (m != NULL) {
   8636 						WM_Q_EVCNT_INCR(txq, defrag);
   8637 						m0 = m;
   8638 						goto retry;
   8639 					}
   8640 				}
   8641 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8642 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8643 				    "DMA segments, dropping...\n",
   8644 				    device_xname(sc->sc_dev));
   8645 				wm_dump_mbuf_chain(sc, m0);
   8646 				m_freem(m0);
   8647 				continue;
   8648 			}
   8649 			/* Short on resources, just stop for now. */
   8650 			DPRINTF(sc, WM_DEBUG_TX,
   8651 			    ("%s: TX: dmamap load failed: %d\n",
   8652 				device_xname(sc->sc_dev), error));
   8653 			break;
   8654 		}
   8655 
   8656 		segs_needed = dmamap->dm_nsegs;
   8657 
   8658 		/*
   8659 		 * Ensure we have enough descriptors free to describe
   8660 		 * the packet. Note, we always reserve one descriptor
   8661 		 * at the end of the ring due to the semantics of the
   8662 		 * TDT register, plus one more in the event we need
   8663 		 * to load offload context.
   8664 		 */
   8665 		if (segs_needed > txq->txq_free - 2) {
   8666 			/*
   8667 			 * Not enough free descriptors to transmit this
   8668 			 * packet.  We haven't committed anything yet,
   8669 			 * so just unload the DMA map, put the packet
   8670 			 * pack on the queue, and punt. Notify the upper
   8671 			 * layer that there are no more slots left.
   8672 			 */
   8673 			DPRINTF(sc, WM_DEBUG_TX,
   8674 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8675 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8676 				segs_needed, txq->txq_free - 1));
   8677 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8678 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8679 			WM_Q_EVCNT_INCR(txq, txdstall);
   8680 			break;
   8681 		}
   8682 
   8683 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8684 
   8685 		DPRINTF(sc, WM_DEBUG_TX,
   8686 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8687 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8688 
   8689 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8690 
   8691 		/*
   8692 		 * Store a pointer to the packet so that we can free it
   8693 		 * later.
   8694 		 *
   8695 		 * Initially, we consider the number of descriptors the
   8696 		 * packet uses the number of DMA segments.  This may be
   8697 		 * incremented by 1 if we do checksum offload (a descriptor
   8698 		 * is used to set the checksum context).
   8699 		 */
   8700 		txs->txs_mbuf = m0;
   8701 		txs->txs_firstdesc = txq->txq_next;
   8702 		txs->txs_ndesc = segs_needed;
   8703 
   8704 		/* Set up offload parameters for this packet. */
   8705 		uint32_t cmdlen, fields, dcmdlen;
   8706 		if (m0->m_pkthdr.csum_flags &
   8707 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8708 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8709 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8710 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8711 			    &do_csum);
   8712 		} else {
   8713 			do_csum = false;
   8714 			cmdlen = 0;
   8715 			fields = 0;
   8716 		}
   8717 
   8718 		/* Sync the DMA map. */
   8719 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8720 		    BUS_DMASYNC_PREWRITE);
   8721 
   8722 		/* Initialize the first transmit descriptor. */
   8723 		nexttx = txq->txq_next;
   8724 		if (!do_csum) {
   8725 			/* Setup a legacy descriptor */
   8726 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8727 			    dmamap->dm_segs[0].ds_addr);
   8728 			txq->txq_descs[nexttx].wtx_cmdlen =
   8729 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8730 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8731 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8732 			if (vlan_has_tag(m0)) {
   8733 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8734 				    htole32(WTX_CMD_VLE);
   8735 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8736 				    htole16(vlan_get_tag(m0));
   8737 			} else
   8738 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8739 
   8740 			dcmdlen = 0;
   8741 		} else {
   8742 			/* Setup an advanced data descriptor */
   8743 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8744 			    htole64(dmamap->dm_segs[0].ds_addr);
   8745 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8746 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8747 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   8748 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8749 			    htole32(fields);
   8750 			DPRINTF(sc, WM_DEBUG_TX,
   8751 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8752 				device_xname(sc->sc_dev), nexttx,
   8753 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8754 			DPRINTF(sc, WM_DEBUG_TX,
   8755 			    ("\t 0x%08x%08x\n", fields,
   8756 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8757 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8758 		}
   8759 
   8760 		lasttx = nexttx;
   8761 		nexttx = WM_NEXTTX(txq, nexttx);
   8762 		/*
   8763 		 * Fill in the next descriptors. legacy or advanced format
   8764 		 * is the same here
   8765 		 */
   8766 		for (seg = 1; seg < dmamap->dm_nsegs;
   8767 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8768 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8769 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8770 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8771 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8772 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8773 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8774 			lasttx = nexttx;
   8775 
   8776 			DPRINTF(sc, WM_DEBUG_TX,
   8777 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8778 				device_xname(sc->sc_dev), nexttx,
   8779 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8780 				dmamap->dm_segs[seg].ds_len));
   8781 		}
   8782 
   8783 		KASSERT(lasttx != -1);
   8784 
   8785 		/*
   8786 		 * Set up the command byte on the last descriptor of
   8787 		 * the packet. If we're in the interrupt delay window,
   8788 		 * delay the interrupt.
   8789 		 */
   8790 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8791 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8792 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8793 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8794 
   8795 		txs->txs_lastdesc = lasttx;
   8796 
   8797 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8798 		    device_xname(sc->sc_dev),
   8799 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8800 
   8801 		/* Sync the descriptors we're using. */
   8802 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8803 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8804 
   8805 		/* Give the packet to the chip. */
   8806 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8807 		sent = true;
   8808 
   8809 		DPRINTF(sc, WM_DEBUG_TX,
   8810 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8811 
   8812 		DPRINTF(sc, WM_DEBUG_TX,
   8813 		    ("%s: TX: finished transmitting packet, job %d\n",
   8814 			device_xname(sc->sc_dev), txq->txq_snext));
   8815 
   8816 		/* Advance the tx pointer. */
   8817 		txq->txq_free -= txs->txs_ndesc;
   8818 		txq->txq_next = nexttx;
   8819 
   8820 		txq->txq_sfree--;
   8821 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8822 
   8823 		/* Pass the packet to any BPF listeners. */
   8824 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8825 	}
   8826 
   8827 	if (m0 != NULL) {
   8828 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8829 		WM_Q_EVCNT_INCR(txq, descdrop);
   8830 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8831 			__func__));
   8832 		m_freem(m0);
   8833 	}
   8834 
   8835 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8836 		/* No more slots; notify upper layer. */
   8837 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8838 	}
   8839 
   8840 	if (sent) {
   8841 		/* Set a watchdog timer in case the chip flakes out. */
   8842 		txq->txq_lastsent = time_uptime;
   8843 		txq->txq_sending = true;
   8844 	}
   8845 }
   8846 
   8847 static void
   8848 wm_deferred_start_locked(struct wm_txqueue *txq)
   8849 {
   8850 	struct wm_softc *sc = txq->txq_sc;
   8851 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8852 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8853 	int qid = wmq->wmq_id;
   8854 
   8855 	KASSERT(mutex_owned(txq->txq_lock));
   8856 
   8857 	if (txq->txq_stopping) {
   8858 		mutex_exit(txq->txq_lock);
   8859 		return;
   8860 	}
   8861 
   8862 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8863 		/* XXX need for ALTQ or one CPU system */
   8864 		if (qid == 0)
   8865 			wm_nq_start_locked(ifp);
   8866 		wm_nq_transmit_locked(ifp, txq);
   8867 	} else {
   8868 		/* XXX need for ALTQ or one CPU system */
   8869 		if (qid == 0)
   8870 			wm_start_locked(ifp);
   8871 		wm_transmit_locked(ifp, txq);
   8872 	}
   8873 }
   8874 
   8875 /* Interrupt */
   8876 
   8877 /*
   8878  * wm_txeof:
   8879  *
   8880  *	Helper; handle transmit interrupts.
   8881  */
   8882 static bool
   8883 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8884 {
   8885 	struct wm_softc *sc = txq->txq_sc;
   8886 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8887 	struct wm_txsoft *txs;
   8888 	int count = 0;
   8889 	int i;
   8890 	uint8_t status;
   8891 	bool more = false;
   8892 
   8893 	KASSERT(mutex_owned(txq->txq_lock));
   8894 
   8895 	if (txq->txq_stopping)
   8896 		return false;
   8897 
   8898 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8899 
   8900 	/*
   8901 	 * Go through the Tx list and free mbufs for those
   8902 	 * frames which have been transmitted.
   8903 	 */
   8904 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8905 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8906 		if (limit-- == 0) {
   8907 			more = true;
   8908 			DPRINTF(sc, WM_DEBUG_TX,
   8909 			    ("%s: TX: loop limited, job %d is not processed\n",
   8910 				device_xname(sc->sc_dev), i));
   8911 			break;
   8912 		}
   8913 
   8914 		txs = &txq->txq_soft[i];
   8915 
   8916 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8917 			device_xname(sc->sc_dev), i));
   8918 
   8919 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8920 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8921 
   8922 		status =
   8923 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8924 		if ((status & WTX_ST_DD) == 0) {
   8925 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8926 			    BUS_DMASYNC_PREREAD);
   8927 			break;
   8928 		}
   8929 
   8930 		count++;
   8931 		DPRINTF(sc, WM_DEBUG_TX,
   8932 		    ("%s: TX: job %d done: descs %d..%d\n",
   8933 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8934 		    txs->txs_lastdesc));
   8935 
   8936 		/*
   8937 		 * XXX We should probably be using the statistics
   8938 		 * XXX registers, but I don't know if they exist
   8939 		 * XXX on chips before the i82544.
   8940 		 */
   8941 
   8942 #ifdef WM_EVENT_COUNTERS
   8943 		if (status & WTX_ST_TU)
   8944 			WM_Q_EVCNT_INCR(txq, underrun);
   8945 #endif /* WM_EVENT_COUNTERS */
   8946 
   8947 		/*
   8948 		 * 82574 and newer's document says the status field has neither
   8949 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8950 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8951 		 * Developer's Manual", 82574 datasheet and newer.
   8952 		 *
   8953 		 * XXX I saw the LC bit was set on I218 even though the media
   8954 		 * was full duplex, so the bit might be used for other
   8955 		 * meaning ...(I have no document).
   8956 		 */
   8957 
   8958 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8959 		    && ((sc->sc_type < WM_T_82574)
   8960 			|| (sc->sc_type == WM_T_80003))) {
   8961 			if_statinc(ifp, if_oerrors);
   8962 			if (status & WTX_ST_LC)
   8963 				log(LOG_WARNING, "%s: late collision\n",
   8964 				    device_xname(sc->sc_dev));
   8965 			else if (status & WTX_ST_EC) {
   8966 				if_statadd(ifp, if_collisions,
   8967 				    TX_COLLISION_THRESHOLD + 1);
   8968 				log(LOG_WARNING, "%s: excessive collisions\n",
   8969 				    device_xname(sc->sc_dev));
   8970 			}
   8971 		} else
   8972 			if_statinc(ifp, if_opackets);
   8973 
   8974 		txq->txq_packets++;
   8975 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8976 
   8977 		txq->txq_free += txs->txs_ndesc;
   8978 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8979 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8980 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8981 		m_freem(txs->txs_mbuf);
   8982 		txs->txs_mbuf = NULL;
   8983 	}
   8984 
   8985 	/* Update the dirty transmit buffer pointer. */
   8986 	txq->txq_sdirty = i;
   8987 	DPRINTF(sc, WM_DEBUG_TX,
   8988 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8989 
   8990 	if (count != 0)
   8991 		rnd_add_uint32(&sc->rnd_source, count);
   8992 
   8993 	/*
   8994 	 * If there are no more pending transmissions, cancel the watchdog
   8995 	 * timer.
   8996 	 */
   8997 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8998 		txq->txq_sending = false;
   8999 
   9000 	return more;
   9001 }
   9002 
   9003 static inline uint32_t
   9004 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   9005 {
   9006 	struct wm_softc *sc = rxq->rxq_sc;
   9007 
   9008 	if (sc->sc_type == WM_T_82574)
   9009 		return EXTRXC_STATUS(
   9010 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9011 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9012 		return NQRXC_STATUS(
   9013 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9014 	else
   9015 		return rxq->rxq_descs[idx].wrx_status;
   9016 }
   9017 
   9018 static inline uint32_t
   9019 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   9020 {
   9021 	struct wm_softc *sc = rxq->rxq_sc;
   9022 
   9023 	if (sc->sc_type == WM_T_82574)
   9024 		return EXTRXC_ERROR(
   9025 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9026 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9027 		return NQRXC_ERROR(
   9028 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9029 	else
   9030 		return rxq->rxq_descs[idx].wrx_errors;
   9031 }
   9032 
   9033 static inline uint16_t
   9034 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   9035 {
   9036 	struct wm_softc *sc = rxq->rxq_sc;
   9037 
   9038 	if (sc->sc_type == WM_T_82574)
   9039 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   9040 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9041 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   9042 	else
   9043 		return rxq->rxq_descs[idx].wrx_special;
   9044 }
   9045 
   9046 static inline int
   9047 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   9048 {
   9049 	struct wm_softc *sc = rxq->rxq_sc;
   9050 
   9051 	if (sc->sc_type == WM_T_82574)
   9052 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   9053 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9054 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   9055 	else
   9056 		return rxq->rxq_descs[idx].wrx_len;
   9057 }
   9058 
   9059 #ifdef WM_DEBUG
   9060 static inline uint32_t
   9061 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   9062 {
   9063 	struct wm_softc *sc = rxq->rxq_sc;
   9064 
   9065 	if (sc->sc_type == WM_T_82574)
   9066 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   9067 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9068 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   9069 	else
   9070 		return 0;
   9071 }
   9072 
   9073 static inline uint8_t
   9074 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   9075 {
   9076 	struct wm_softc *sc = rxq->rxq_sc;
   9077 
   9078 	if (sc->sc_type == WM_T_82574)
   9079 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   9080 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9081 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   9082 	else
   9083 		return 0;
   9084 }
   9085 #endif /* WM_DEBUG */
   9086 
   9087 static inline bool
   9088 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   9089     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9090 {
   9091 
   9092 	if (sc->sc_type == WM_T_82574)
   9093 		return (status & ext_bit) != 0;
   9094 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9095 		return (status & nq_bit) != 0;
   9096 	else
   9097 		return (status & legacy_bit) != 0;
   9098 }
   9099 
   9100 static inline bool
   9101 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   9102     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9103 {
   9104 
   9105 	if (sc->sc_type == WM_T_82574)
   9106 		return (error & ext_bit) != 0;
   9107 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9108 		return (error & nq_bit) != 0;
   9109 	else
   9110 		return (error & legacy_bit) != 0;
   9111 }
   9112 
   9113 static inline bool
   9114 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   9115 {
   9116 
   9117 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9118 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   9119 		return true;
   9120 	else
   9121 		return false;
   9122 }
   9123 
   9124 static inline bool
   9125 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   9126 {
   9127 	struct wm_softc *sc = rxq->rxq_sc;
   9128 
   9129 	/* XXX missing error bit for newqueue? */
   9130 	if (wm_rxdesc_is_set_error(sc, errors,
   9131 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   9132 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   9133 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   9134 		NQRXC_ERROR_RXE)) {
   9135 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   9136 		    EXTRXC_ERROR_SE, 0))
   9137 			log(LOG_WARNING, "%s: symbol error\n",
   9138 			    device_xname(sc->sc_dev));
   9139 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   9140 		    EXTRXC_ERROR_SEQ, 0))
   9141 			log(LOG_WARNING, "%s: receive sequence error\n",
   9142 			    device_xname(sc->sc_dev));
   9143 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   9144 		    EXTRXC_ERROR_CE, 0))
   9145 			log(LOG_WARNING, "%s: CRC error\n",
   9146 			    device_xname(sc->sc_dev));
   9147 		return true;
   9148 	}
   9149 
   9150 	return false;
   9151 }
   9152 
   9153 static inline bool
   9154 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   9155 {
   9156 	struct wm_softc *sc = rxq->rxq_sc;
   9157 
   9158 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   9159 		NQRXC_STATUS_DD)) {
   9160 		/* We have processed all of the receive descriptors. */
   9161 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   9162 		return false;
   9163 	}
   9164 
   9165 	return true;
   9166 }
   9167 
   9168 static inline bool
   9169 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   9170     uint16_t vlantag, struct mbuf *m)
   9171 {
   9172 
   9173 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9174 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   9175 		vlan_set_tag(m, le16toh(vlantag));
   9176 	}
   9177 
   9178 	return true;
   9179 }
   9180 
   9181 static inline void
   9182 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   9183     uint32_t errors, struct mbuf *m)
   9184 {
   9185 	struct wm_softc *sc = rxq->rxq_sc;
   9186 
   9187 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   9188 		if (wm_rxdesc_is_set_status(sc, status,
   9189 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   9190 			WM_Q_EVCNT_INCR(rxq, ipsum);
   9191 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   9192 			if (wm_rxdesc_is_set_error(sc, errors,
   9193 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   9194 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   9195 		}
   9196 		if (wm_rxdesc_is_set_status(sc, status,
   9197 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   9198 			/*
   9199 			 * Note: we don't know if this was TCP or UDP,
   9200 			 * so we just set both bits, and expect the
   9201 			 * upper layers to deal.
   9202 			 */
   9203 			WM_Q_EVCNT_INCR(rxq, tusum);
   9204 			m->m_pkthdr.csum_flags |=
   9205 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9206 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   9207 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   9208 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   9209 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   9210 		}
   9211 	}
   9212 }
   9213 
   9214 /*
   9215  * wm_rxeof:
   9216  *
   9217  *	Helper; handle receive interrupts.
   9218  */
   9219 static bool
   9220 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   9221 {
   9222 	struct wm_softc *sc = rxq->rxq_sc;
   9223 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9224 	struct wm_rxsoft *rxs;
   9225 	struct mbuf *m;
   9226 	int i, len;
   9227 	int count = 0;
   9228 	uint32_t status, errors;
   9229 	uint16_t vlantag;
   9230 	bool more = false;
   9231 
   9232 	KASSERT(mutex_owned(rxq->rxq_lock));
   9233 
   9234 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   9235 		if (limit-- == 0) {
   9236 			more = true;
   9237 			DPRINTF(sc, WM_DEBUG_RX,
   9238 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   9239 				device_xname(sc->sc_dev), i));
   9240 			break;
   9241 		}
   9242 
   9243 		rxs = &rxq->rxq_soft[i];
   9244 
   9245 		DPRINTF(sc, WM_DEBUG_RX,
   9246 		    ("%s: RX: checking descriptor %d\n",
   9247 			device_xname(sc->sc_dev), i));
   9248 		wm_cdrxsync(rxq, i,
   9249 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9250 
   9251 		status = wm_rxdesc_get_status(rxq, i);
   9252 		errors = wm_rxdesc_get_errors(rxq, i);
   9253 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   9254 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   9255 #ifdef WM_DEBUG
   9256 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   9257 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   9258 #endif
   9259 
   9260 		if (!wm_rxdesc_dd(rxq, i, status)) {
   9261 			break;
   9262 		}
   9263 
   9264 		count++;
   9265 		if (__predict_false(rxq->rxq_discard)) {
   9266 			DPRINTF(sc, WM_DEBUG_RX,
   9267 			    ("%s: RX: discarding contents of descriptor %d\n",
   9268 				device_xname(sc->sc_dev), i));
   9269 			wm_init_rxdesc(rxq, i);
   9270 			if (wm_rxdesc_is_eop(rxq, status)) {
   9271 				/* Reset our state. */
   9272 				DPRINTF(sc, WM_DEBUG_RX,
   9273 				    ("%s: RX: resetting rxdiscard -> 0\n",
   9274 					device_xname(sc->sc_dev)));
   9275 				rxq->rxq_discard = 0;
   9276 			}
   9277 			continue;
   9278 		}
   9279 
   9280 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9281 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   9282 
   9283 		m = rxs->rxs_mbuf;
   9284 
   9285 		/*
   9286 		 * Add a new receive buffer to the ring, unless of
   9287 		 * course the length is zero. Treat the latter as a
   9288 		 * failed mapping.
   9289 		 */
   9290 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   9291 			/*
   9292 			 * Failed, throw away what we've done so
   9293 			 * far, and discard the rest of the packet.
   9294 			 */
   9295 			if_statinc(ifp, if_ierrors);
   9296 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9297 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   9298 			wm_init_rxdesc(rxq, i);
   9299 			if (!wm_rxdesc_is_eop(rxq, status))
   9300 				rxq->rxq_discard = 1;
   9301 			if (rxq->rxq_head != NULL)
   9302 				m_freem(rxq->rxq_head);
   9303 			WM_RXCHAIN_RESET(rxq);
   9304 			DPRINTF(sc, WM_DEBUG_RX,
   9305 			    ("%s: RX: Rx buffer allocation failed, "
   9306 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   9307 				rxq->rxq_discard ? " (discard)" : ""));
   9308 			continue;
   9309 		}
   9310 
   9311 		m->m_len = len;
   9312 		rxq->rxq_len += len;
   9313 		DPRINTF(sc, WM_DEBUG_RX,
   9314 		    ("%s: RX: buffer at %p len %d\n",
   9315 			device_xname(sc->sc_dev), m->m_data, len));
   9316 
   9317 		/* If this is not the end of the packet, keep looking. */
   9318 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9319 			WM_RXCHAIN_LINK(rxq, m);
   9320 			DPRINTF(sc, WM_DEBUG_RX,
   9321 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9322 				device_xname(sc->sc_dev), rxq->rxq_len));
   9323 			continue;
   9324 		}
   9325 
   9326 		/*
   9327 		 * Okay, we have the entire packet now. The chip is
   9328 		 * configured to include the FCS except I35[04], I21[01].
   9329 		 * (not all chips can be configured to strip it), so we need
   9330 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
   9331 		 * in RCTL register is always set, so we don't trim it.
   9332 		 * PCH2 and newer chip also not include FCS when jumbo
   9333 		 * frame is used to do workaround an errata.
   9334 		 * May need to adjust length of previous mbuf in the
   9335 		 * chain if the current mbuf is too short.
   9336 		 */
   9337 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
   9338 			if (m->m_len < ETHER_CRC_LEN) {
   9339 				rxq->rxq_tail->m_len
   9340 				    -= (ETHER_CRC_LEN - m->m_len);
   9341 				m->m_len = 0;
   9342 			} else
   9343 				m->m_len -= ETHER_CRC_LEN;
   9344 			len = rxq->rxq_len - ETHER_CRC_LEN;
   9345 		} else
   9346 			len = rxq->rxq_len;
   9347 
   9348 		WM_RXCHAIN_LINK(rxq, m);
   9349 
   9350 		*rxq->rxq_tailp = NULL;
   9351 		m = rxq->rxq_head;
   9352 
   9353 		WM_RXCHAIN_RESET(rxq);
   9354 
   9355 		DPRINTF(sc, WM_DEBUG_RX,
   9356 		    ("%s: RX: have entire packet, len -> %d\n",
   9357 			device_xname(sc->sc_dev), len));
   9358 
   9359 		/* If an error occurred, update stats and drop the packet. */
   9360 		if (wm_rxdesc_has_errors(rxq, errors)) {
   9361 			m_freem(m);
   9362 			continue;
   9363 		}
   9364 
   9365 		/* No errors.  Receive the packet. */
   9366 		m_set_rcvif(m, ifp);
   9367 		m->m_pkthdr.len = len;
   9368 		/*
   9369 		 * TODO
   9370 		 * should be save rsshash and rsstype to this mbuf.
   9371 		 */
   9372 		DPRINTF(sc, WM_DEBUG_RX,
   9373 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   9374 			device_xname(sc->sc_dev), rsstype, rsshash));
   9375 
   9376 		/*
   9377 		 * If VLANs are enabled, VLAN packets have been unwrapped
   9378 		 * for us.  Associate the tag with the packet.
   9379 		 */
   9380 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   9381 			continue;
   9382 
   9383 		/* Set up checksum info for this packet. */
   9384 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   9385 
   9386 		rxq->rxq_packets++;
   9387 		rxq->rxq_bytes += len;
   9388 		/* Pass it on. */
   9389 		if_percpuq_enqueue(sc->sc_ipq, m);
   9390 
   9391 		if (rxq->rxq_stopping)
   9392 			break;
   9393 	}
   9394 	rxq->rxq_ptr = i;
   9395 
   9396 	if (count != 0)
   9397 		rnd_add_uint32(&sc->rnd_source, count);
   9398 
   9399 	DPRINTF(sc, WM_DEBUG_RX,
   9400 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   9401 
   9402 	return more;
   9403 }
   9404 
   9405 /*
   9406  * wm_linkintr_gmii:
   9407  *
   9408  *	Helper; handle link interrupts for GMII.
   9409  */
   9410 static void
   9411 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   9412 {
   9413 	device_t dev = sc->sc_dev;
   9414 	uint32_t status, reg;
   9415 	bool link;
   9416 	int rv;
   9417 
   9418 	KASSERT(WM_CORE_LOCKED(sc));
   9419 
   9420 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9421 		__func__));
   9422 
   9423 	if ((icr & ICR_LSC) == 0) {
   9424 		if (icr & ICR_RXSEQ)
   9425 			DPRINTF(sc, WM_DEBUG_LINK,
   9426 			    ("%s: LINK Receive sequence error\n",
   9427 				device_xname(dev)));
   9428 		return;
   9429 	}
   9430 
   9431 	/* Link status changed */
   9432 	status = CSR_READ(sc, WMREG_STATUS);
   9433 	link = status & STATUS_LU;
   9434 	if (link) {
   9435 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9436 			device_xname(dev),
   9437 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9438 		if (wm_phy_need_linkdown_discard(sc))
   9439 			wm_clear_linkdown_discard(sc);
   9440 	} else {
   9441 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9442 			device_xname(dev)));
   9443 		if (wm_phy_need_linkdown_discard(sc))
   9444 			wm_set_linkdown_discard(sc);
   9445 	}
   9446 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9447 		wm_gig_downshift_workaround_ich8lan(sc);
   9448 
   9449 	if ((sc->sc_type == WM_T_ICH8)
   9450 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   9451 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9452 	}
   9453 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9454 		device_xname(dev)));
   9455 	mii_pollstat(&sc->sc_mii);
   9456 	if (sc->sc_type == WM_T_82543) {
   9457 		int miistatus, active;
   9458 
   9459 		/*
   9460 		 * With 82543, we need to force speed and
   9461 		 * duplex on the MAC equal to what the PHY
   9462 		 * speed and duplex configuration is.
   9463 		 */
   9464 		miistatus = sc->sc_mii.mii_media_status;
   9465 
   9466 		if (miistatus & IFM_ACTIVE) {
   9467 			active = sc->sc_mii.mii_media_active;
   9468 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9469 			switch (IFM_SUBTYPE(active)) {
   9470 			case IFM_10_T:
   9471 				sc->sc_ctrl |= CTRL_SPEED_10;
   9472 				break;
   9473 			case IFM_100_TX:
   9474 				sc->sc_ctrl |= CTRL_SPEED_100;
   9475 				break;
   9476 			case IFM_1000_T:
   9477 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9478 				break;
   9479 			default:
   9480 				/*
   9481 				 * Fiber?
   9482 				 * Shoud not enter here.
   9483 				 */
   9484 				device_printf(dev, "unknown media (%x)\n",
   9485 				    active);
   9486 				break;
   9487 			}
   9488 			if (active & IFM_FDX)
   9489 				sc->sc_ctrl |= CTRL_FD;
   9490 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9491 		}
   9492 	} else if (sc->sc_type == WM_T_PCH) {
   9493 		wm_k1_gig_workaround_hv(sc,
   9494 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9495 	}
   9496 
   9497 	/*
   9498 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9499 	 * aggressive resulting in many collisions. To avoid this, increase
   9500 	 * the IPG and reduce Rx latency in the PHY.
   9501 	 */
   9502 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9503 	    && link) {
   9504 		uint32_t tipg_reg;
   9505 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9506 		bool fdx;
   9507 		uint16_t emi_addr, emi_val;
   9508 
   9509 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9510 		tipg_reg &= ~TIPG_IPGT_MASK;
   9511 		fdx = status & STATUS_FD;
   9512 
   9513 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9514 			tipg_reg |= 0xff;
   9515 			/* Reduce Rx latency in analog PHY */
   9516 			emi_val = 0;
   9517 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9518 		    fdx && speed != STATUS_SPEED_1000) {
   9519 			tipg_reg |= 0xc;
   9520 			emi_val = 1;
   9521 		} else {
   9522 			/* Roll back the default values */
   9523 			tipg_reg |= 0x08;
   9524 			emi_val = 1;
   9525 		}
   9526 
   9527 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9528 
   9529 		rv = sc->phy.acquire(sc);
   9530 		if (rv)
   9531 			return;
   9532 
   9533 		if (sc->sc_type == WM_T_PCH2)
   9534 			emi_addr = I82579_RX_CONFIG;
   9535 		else
   9536 			emi_addr = I217_RX_CONFIG;
   9537 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9538 
   9539 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9540 			uint16_t phy_reg;
   9541 
   9542 			sc->phy.readreg_locked(dev, 2,
   9543 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9544 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9545 			if (speed == STATUS_SPEED_100
   9546 			    || speed == STATUS_SPEED_10)
   9547 				phy_reg |= 0x3e8;
   9548 			else
   9549 				phy_reg |= 0xfa;
   9550 			sc->phy.writereg_locked(dev, 2,
   9551 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9552 
   9553 			if (speed == STATUS_SPEED_1000) {
   9554 				sc->phy.readreg_locked(dev, 2,
   9555 				    HV_PM_CTRL, &phy_reg);
   9556 
   9557 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9558 
   9559 				sc->phy.writereg_locked(dev, 2,
   9560 				    HV_PM_CTRL, phy_reg);
   9561 			}
   9562 		}
   9563 		sc->phy.release(sc);
   9564 
   9565 		if (rv)
   9566 			return;
   9567 
   9568 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9569 			uint16_t data, ptr_gap;
   9570 
   9571 			if (speed == STATUS_SPEED_1000) {
   9572 				rv = sc->phy.acquire(sc);
   9573 				if (rv)
   9574 					return;
   9575 
   9576 				rv = sc->phy.readreg_locked(dev, 2,
   9577 				    I82579_UNKNOWN1, &data);
   9578 				if (rv) {
   9579 					sc->phy.release(sc);
   9580 					return;
   9581 				}
   9582 
   9583 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9584 				if (ptr_gap < 0x18) {
   9585 					data &= ~(0x3ff << 2);
   9586 					data |= (0x18 << 2);
   9587 					rv = sc->phy.writereg_locked(dev,
   9588 					    2, I82579_UNKNOWN1, data);
   9589 				}
   9590 				sc->phy.release(sc);
   9591 				if (rv)
   9592 					return;
   9593 			} else {
   9594 				rv = sc->phy.acquire(sc);
   9595 				if (rv)
   9596 					return;
   9597 
   9598 				rv = sc->phy.writereg_locked(dev, 2,
   9599 				    I82579_UNKNOWN1, 0xc023);
   9600 				sc->phy.release(sc);
   9601 				if (rv)
   9602 					return;
   9603 
   9604 			}
   9605 		}
   9606 	}
   9607 
   9608 	/*
   9609 	 * I217 Packet Loss issue:
   9610 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9611 	 * on power up.
   9612 	 * Set the Beacon Duration for I217 to 8 usec
   9613 	 */
   9614 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9615 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9616 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9617 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9618 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9619 	}
   9620 
   9621 	/* Work-around I218 hang issue */
   9622 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9623 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9624 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9625 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9626 		wm_k1_workaround_lpt_lp(sc, link);
   9627 
   9628 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9629 		/*
   9630 		 * Set platform power management values for Latency
   9631 		 * Tolerance Reporting (LTR)
   9632 		 */
   9633 		wm_platform_pm_pch_lpt(sc,
   9634 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9635 	}
   9636 
   9637 	/* Clear link partner's EEE ability */
   9638 	sc->eee_lp_ability = 0;
   9639 
   9640 	/* FEXTNVM6 K1-off workaround */
   9641 	if (sc->sc_type == WM_T_PCH_SPT) {
   9642 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9643 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9644 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9645 		else
   9646 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9647 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9648 	}
   9649 
   9650 	if (!link)
   9651 		return;
   9652 
   9653 	switch (sc->sc_type) {
   9654 	case WM_T_PCH2:
   9655 		wm_k1_workaround_lv(sc);
   9656 		/* FALLTHROUGH */
   9657 	case WM_T_PCH:
   9658 		if (sc->sc_phytype == WMPHY_82578)
   9659 			wm_link_stall_workaround_hv(sc);
   9660 		break;
   9661 	default:
   9662 		break;
   9663 	}
   9664 
   9665 	/* Enable/Disable EEE after link up */
   9666 	if (sc->sc_phytype > WMPHY_82579)
   9667 		wm_set_eee_pchlan(sc);
   9668 }
   9669 
   9670 /*
   9671  * wm_linkintr_tbi:
   9672  *
   9673  *	Helper; handle link interrupts for TBI mode.
   9674  */
   9675 static void
   9676 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9677 {
   9678 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9679 	uint32_t status;
   9680 
   9681 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9682 		__func__));
   9683 
   9684 	status = CSR_READ(sc, WMREG_STATUS);
   9685 	if (icr & ICR_LSC) {
   9686 		wm_check_for_link(sc);
   9687 		if (status & STATUS_LU) {
   9688 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9689 				device_xname(sc->sc_dev),
   9690 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9691 			/*
   9692 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9693 			 * so we should update sc->sc_ctrl
   9694 			 */
   9695 
   9696 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9697 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9698 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9699 			if (status & STATUS_FD)
   9700 				sc->sc_tctl |=
   9701 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9702 			else
   9703 				sc->sc_tctl |=
   9704 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9705 			if (sc->sc_ctrl & CTRL_TFCE)
   9706 				sc->sc_fcrtl |= FCRTL_XONE;
   9707 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9708 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9709 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9710 			sc->sc_tbi_linkup = 1;
   9711 			if_link_state_change(ifp, LINK_STATE_UP);
   9712 		} else {
   9713 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9714 				device_xname(sc->sc_dev)));
   9715 			sc->sc_tbi_linkup = 0;
   9716 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9717 		}
   9718 		/* Update LED */
   9719 		wm_tbi_serdes_set_linkled(sc);
   9720 	} else if (icr & ICR_RXSEQ)
   9721 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9722 			device_xname(sc->sc_dev)));
   9723 }
   9724 
   9725 /*
   9726  * wm_linkintr_serdes:
   9727  *
   9728  *	Helper; handle link interrupts for TBI mode.
   9729  */
   9730 static void
   9731 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9732 {
   9733 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9734 	struct mii_data *mii = &sc->sc_mii;
   9735 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9736 	uint32_t pcs_adv, pcs_lpab, reg;
   9737 
   9738 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9739 		__func__));
   9740 
   9741 	if (icr & ICR_LSC) {
   9742 		/* Check PCS */
   9743 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9744 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9745 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9746 				device_xname(sc->sc_dev)));
   9747 			mii->mii_media_status |= IFM_ACTIVE;
   9748 			sc->sc_tbi_linkup = 1;
   9749 			if_link_state_change(ifp, LINK_STATE_UP);
   9750 		} else {
   9751 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9752 				device_xname(sc->sc_dev)));
   9753 			mii->mii_media_status |= IFM_NONE;
   9754 			sc->sc_tbi_linkup = 0;
   9755 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9756 			wm_tbi_serdes_set_linkled(sc);
   9757 			return;
   9758 		}
   9759 		mii->mii_media_active |= IFM_1000_SX;
   9760 		if ((reg & PCS_LSTS_FDX) != 0)
   9761 			mii->mii_media_active |= IFM_FDX;
   9762 		else
   9763 			mii->mii_media_active |= IFM_HDX;
   9764 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9765 			/* Check flow */
   9766 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9767 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9768 				DPRINTF(sc, WM_DEBUG_LINK,
   9769 				    ("XXX LINKOK but not ACOMP\n"));
   9770 				return;
   9771 			}
   9772 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9773 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9774 			DPRINTF(sc, WM_DEBUG_LINK,
   9775 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9776 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9777 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9778 				mii->mii_media_active |= IFM_FLOW
   9779 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9780 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9781 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9782 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9783 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9784 				mii->mii_media_active |= IFM_FLOW
   9785 				    | IFM_ETH_TXPAUSE;
   9786 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9787 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9788 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9789 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9790 				mii->mii_media_active |= IFM_FLOW
   9791 				    | IFM_ETH_RXPAUSE;
   9792 		}
   9793 		/* Update LED */
   9794 		wm_tbi_serdes_set_linkled(sc);
   9795 	} else
   9796 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9797 		    device_xname(sc->sc_dev)));
   9798 }
   9799 
   9800 /*
   9801  * wm_linkintr:
   9802  *
   9803  *	Helper; handle link interrupts.
   9804  */
   9805 static void
   9806 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9807 {
   9808 
   9809 	KASSERT(WM_CORE_LOCKED(sc));
   9810 
   9811 	if (sc->sc_flags & WM_F_HAS_MII)
   9812 		wm_linkintr_gmii(sc, icr);
   9813 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9814 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9815 		wm_linkintr_serdes(sc, icr);
   9816 	else
   9817 		wm_linkintr_tbi(sc, icr);
   9818 }
   9819 
   9820 
   9821 static inline void
   9822 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   9823 {
   9824 
   9825 	if (wmq->wmq_txrx_use_workqueue)
   9826 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
   9827 	else
   9828 		softint_schedule(wmq->wmq_si);
   9829 }
   9830 
   9831 static inline void
   9832 wm_legacy_intr_disable(struct wm_softc *sc)
   9833 {
   9834 
   9835 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   9836 }
   9837 
   9838 static inline void
   9839 wm_legacy_intr_enable(struct wm_softc *sc)
   9840 {
   9841 
   9842 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   9843 }
   9844 
   9845 /*
   9846  * wm_intr_legacy:
   9847  *
   9848  *	Interrupt service routine for INTx and MSI.
   9849  */
   9850 static int
   9851 wm_intr_legacy(void *arg)
   9852 {
   9853 	struct wm_softc *sc = arg;
   9854 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9855 	struct wm_queue *wmq = &sc->sc_queue[0];
   9856 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9857 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9858 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9859 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9860 	uint32_t icr, rndval = 0;
   9861 	bool more = false;
   9862 
   9863 	icr = CSR_READ(sc, WMREG_ICR);
   9864 	if ((icr & sc->sc_icr) == 0)
   9865 		return 0;
   9866 
   9867 	DPRINTF(sc, WM_DEBUG_TX,
   9868 	    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9869 	if (rndval == 0)
   9870 		rndval = icr;
   9871 
   9872 	mutex_enter(rxq->rxq_lock);
   9873 
   9874 	if (rxq->rxq_stopping) {
   9875 		mutex_exit(rxq->rxq_lock);
   9876 		return 1;
   9877 	}
   9878 
   9879 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9880 	if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9881 		DPRINTF(sc, WM_DEBUG_RX,
   9882 		    ("%s: RX: got Rx intr 0x%08x\n",
   9883 			device_xname(sc->sc_dev),
   9884 			icr & (ICR_RXDMT0 | ICR_RXT0)));
   9885 		WM_Q_EVCNT_INCR(rxq, intr);
   9886 	}
   9887 #endif
   9888 	/*
   9889 	 * wm_rxeof() does *not* call upper layer functions directly,
   9890 	 * as if_percpuq_enqueue() just call softint_schedule().
   9891 	 * So, we can call wm_rxeof() in interrupt context.
   9892 	 */
   9893 	more = wm_rxeof(rxq, rxlimit);
   9894 
   9895 	mutex_exit(rxq->rxq_lock);
   9896 	mutex_enter(txq->txq_lock);
   9897 
   9898 	if (txq->txq_stopping) {
   9899 		mutex_exit(txq->txq_lock);
   9900 		return 1;
   9901 	}
   9902 
   9903 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9904 	if (icr & ICR_TXDW) {
   9905 		DPRINTF(sc, WM_DEBUG_TX,
   9906 		    ("%s: TX: got TXDW interrupt\n",
   9907 			device_xname(sc->sc_dev)));
   9908 		WM_Q_EVCNT_INCR(txq, txdw);
   9909 	}
   9910 #endif
   9911 	more |= wm_txeof(txq, txlimit);
   9912 	if (!IF_IS_EMPTY(&ifp->if_snd))
   9913 		more = true;
   9914 
   9915 	mutex_exit(txq->txq_lock);
   9916 	WM_CORE_LOCK(sc);
   9917 
   9918 	if (sc->sc_core_stopping) {
   9919 		WM_CORE_UNLOCK(sc);
   9920 		return 1;
   9921 	}
   9922 
   9923 	if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9924 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9925 		wm_linkintr(sc, icr);
   9926 	}
   9927 	if ((icr & ICR_GPI(0)) != 0)
   9928 		device_printf(sc->sc_dev, "got module interrupt\n");
   9929 
   9930 	WM_CORE_UNLOCK(sc);
   9931 
   9932 	if (icr & ICR_RXO) {
   9933 #if defined(WM_DEBUG)
   9934 		log(LOG_WARNING, "%s: Receive overrun\n",
   9935 		    device_xname(sc->sc_dev));
   9936 #endif /* defined(WM_DEBUG) */
   9937 	}
   9938 
   9939 	rnd_add_uint32(&sc->rnd_source, rndval);
   9940 
   9941 	if (more) {
   9942 		/* Try to get more packets going. */
   9943 		wm_legacy_intr_disable(sc);
   9944 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9945 		wm_sched_handle_queue(sc, wmq);
   9946 	}
   9947 
   9948 	return 1;
   9949 }
   9950 
   9951 static inline void
   9952 wm_txrxintr_disable(struct wm_queue *wmq)
   9953 {
   9954 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9955 
   9956 	if (__predict_false(!wm_is_using_msix(sc))) {
   9957 		return wm_legacy_intr_disable(sc);
   9958 	}
   9959 
   9960 	if (sc->sc_type == WM_T_82574)
   9961 		CSR_WRITE(sc, WMREG_IMC,
   9962 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9963 	else if (sc->sc_type == WM_T_82575)
   9964 		CSR_WRITE(sc, WMREG_EIMC,
   9965 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9966 	else
   9967 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9968 }
   9969 
   9970 static inline void
   9971 wm_txrxintr_enable(struct wm_queue *wmq)
   9972 {
   9973 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9974 
   9975 	wm_itrs_calculate(sc, wmq);
   9976 
   9977 	if (__predict_false(!wm_is_using_msix(sc))) {
   9978 		return wm_legacy_intr_enable(sc);
   9979 	}
   9980 
   9981 	/*
   9982 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9983 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9984 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9985 	 * while each wm_handle_queue(wmq) is runnig.
   9986 	 */
   9987 	if (sc->sc_type == WM_T_82574)
   9988 		CSR_WRITE(sc, WMREG_IMS,
   9989 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9990 	else if (sc->sc_type == WM_T_82575)
   9991 		CSR_WRITE(sc, WMREG_EIMS,
   9992 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9993 	else
   9994 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9995 }
   9996 
   9997 static int
   9998 wm_txrxintr_msix(void *arg)
   9999 {
   10000 	struct wm_queue *wmq = arg;
   10001 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10002 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10003 	struct wm_softc *sc = txq->txq_sc;
   10004 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10005 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10006 	bool txmore;
   10007 	bool rxmore;
   10008 
   10009 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   10010 
   10011 	DPRINTF(sc, WM_DEBUG_TX,
   10012 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   10013 
   10014 	wm_txrxintr_disable(wmq);
   10015 
   10016 	mutex_enter(txq->txq_lock);
   10017 
   10018 	if (txq->txq_stopping) {
   10019 		mutex_exit(txq->txq_lock);
   10020 		return 1;
   10021 	}
   10022 
   10023 	WM_Q_EVCNT_INCR(txq, txdw);
   10024 	txmore = wm_txeof(txq, txlimit);
   10025 	/* wm_deferred start() is done in wm_handle_queue(). */
   10026 	mutex_exit(txq->txq_lock);
   10027 
   10028 	DPRINTF(sc, WM_DEBUG_RX,
   10029 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   10030 	mutex_enter(rxq->rxq_lock);
   10031 
   10032 	if (rxq->rxq_stopping) {
   10033 		mutex_exit(rxq->rxq_lock);
   10034 		return 1;
   10035 	}
   10036 
   10037 	WM_Q_EVCNT_INCR(rxq, intr);
   10038 	rxmore = wm_rxeof(rxq, rxlimit);
   10039 	mutex_exit(rxq->rxq_lock);
   10040 
   10041 	wm_itrs_writereg(sc, wmq);
   10042 
   10043 	if (txmore || rxmore) {
   10044 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10045 		wm_sched_handle_queue(sc, wmq);
   10046 	} else
   10047 		wm_txrxintr_enable(wmq);
   10048 
   10049 	return 1;
   10050 }
   10051 
   10052 static void
   10053 wm_handle_queue(void *arg)
   10054 {
   10055 	struct wm_queue *wmq = arg;
   10056 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10057 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10058 	struct wm_softc *sc = txq->txq_sc;
   10059 	u_int txlimit = sc->sc_tx_process_limit;
   10060 	u_int rxlimit = sc->sc_rx_process_limit;
   10061 	bool txmore;
   10062 	bool rxmore;
   10063 
   10064 	mutex_enter(txq->txq_lock);
   10065 	if (txq->txq_stopping) {
   10066 		mutex_exit(txq->txq_lock);
   10067 		return;
   10068 	}
   10069 	txmore = wm_txeof(txq, txlimit);
   10070 	wm_deferred_start_locked(txq);
   10071 	mutex_exit(txq->txq_lock);
   10072 
   10073 	mutex_enter(rxq->rxq_lock);
   10074 	if (rxq->rxq_stopping) {
   10075 		mutex_exit(rxq->rxq_lock);
   10076 		return;
   10077 	}
   10078 	WM_Q_EVCNT_INCR(rxq, defer);
   10079 	rxmore = wm_rxeof(rxq, rxlimit);
   10080 	mutex_exit(rxq->rxq_lock);
   10081 
   10082 	if (txmore || rxmore) {
   10083 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10084 		wm_sched_handle_queue(sc, wmq);
   10085 	} else
   10086 		wm_txrxintr_enable(wmq);
   10087 }
   10088 
   10089 static void
   10090 wm_handle_queue_work(struct work *wk, void *context)
   10091 {
   10092 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   10093 
   10094 	/*
   10095 	 * "enqueued flag" is not required here.
   10096 	 */
   10097 	wm_handle_queue(wmq);
   10098 }
   10099 
   10100 /*
   10101  * wm_linkintr_msix:
   10102  *
   10103  *	Interrupt service routine for link status change for MSI-X.
   10104  */
   10105 static int
   10106 wm_linkintr_msix(void *arg)
   10107 {
   10108 	struct wm_softc *sc = arg;
   10109 	uint32_t reg;
   10110 	bool has_rxo;
   10111 
   10112 	reg = CSR_READ(sc, WMREG_ICR);
   10113 	WM_CORE_LOCK(sc);
   10114 	DPRINTF(sc, WM_DEBUG_LINK,
   10115 	    ("%s: LINK: got link intr. ICR = %08x\n",
   10116 		device_xname(sc->sc_dev), reg));
   10117 
   10118 	if (sc->sc_core_stopping)
   10119 		goto out;
   10120 
   10121 	if ((reg & ICR_LSC) != 0) {
   10122 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10123 		wm_linkintr(sc, ICR_LSC);
   10124 	}
   10125 	if ((reg & ICR_GPI(0)) != 0)
   10126 		device_printf(sc->sc_dev, "got module interrupt\n");
   10127 
   10128 	/*
   10129 	 * XXX 82574 MSI-X mode workaround
   10130 	 *
   10131 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   10132 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   10133 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   10134 	 * interrupts by writing WMREG_ICS to process receive packets.
   10135 	 */
   10136 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   10137 #if defined(WM_DEBUG)
   10138 		log(LOG_WARNING, "%s: Receive overrun\n",
   10139 		    device_xname(sc->sc_dev));
   10140 #endif /* defined(WM_DEBUG) */
   10141 
   10142 		has_rxo = true;
   10143 		/*
   10144 		 * The RXO interrupt is very high rate when receive traffic is
   10145 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   10146 		 * interrupts. ICR_OTHER will be enabled at the end of
   10147 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   10148 		 * ICR_RXQ(1) interrupts.
   10149 		 */
   10150 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   10151 
   10152 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   10153 	}
   10154 
   10155 
   10156 
   10157 out:
   10158 	WM_CORE_UNLOCK(sc);
   10159 
   10160 	if (sc->sc_type == WM_T_82574) {
   10161 		if (!has_rxo)
   10162 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   10163 		else
   10164 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   10165 	} else if (sc->sc_type == WM_T_82575)
   10166 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   10167 	else
   10168 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   10169 
   10170 	return 1;
   10171 }
   10172 
   10173 /*
   10174  * Media related.
   10175  * GMII, SGMII, TBI (and SERDES)
   10176  */
   10177 
   10178 /* Common */
   10179 
   10180 /*
   10181  * wm_tbi_serdes_set_linkled:
   10182  *
   10183  *	Update the link LED on TBI and SERDES devices.
   10184  */
   10185 static void
   10186 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   10187 {
   10188 
   10189 	if (sc->sc_tbi_linkup)
   10190 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   10191 	else
   10192 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   10193 
   10194 	/* 82540 or newer devices are active low */
   10195 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   10196 
   10197 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10198 }
   10199 
   10200 /* GMII related */
   10201 
   10202 /*
   10203  * wm_gmii_reset:
   10204  *
   10205  *	Reset the PHY.
   10206  */
   10207 static void
   10208 wm_gmii_reset(struct wm_softc *sc)
   10209 {
   10210 	uint32_t reg;
   10211 	int rv;
   10212 
   10213 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10214 		device_xname(sc->sc_dev), __func__));
   10215 
   10216 	rv = sc->phy.acquire(sc);
   10217 	if (rv != 0) {
   10218 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10219 		    __func__);
   10220 		return;
   10221 	}
   10222 
   10223 	switch (sc->sc_type) {
   10224 	case WM_T_82542_2_0:
   10225 	case WM_T_82542_2_1:
   10226 		/* null */
   10227 		break;
   10228 	case WM_T_82543:
   10229 		/*
   10230 		 * With 82543, we need to force speed and duplex on the MAC
   10231 		 * equal to what the PHY speed and duplex configuration is.
   10232 		 * In addition, we need to perform a hardware reset on the PHY
   10233 		 * to take it out of reset.
   10234 		 */
   10235 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10236 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10237 
   10238 		/* The PHY reset pin is active-low. */
   10239 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10240 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   10241 		    CTRL_EXT_SWDPIN(4));
   10242 		reg |= CTRL_EXT_SWDPIO(4);
   10243 
   10244 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10245 		CSR_WRITE_FLUSH(sc);
   10246 		delay(10*1000);
   10247 
   10248 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   10249 		CSR_WRITE_FLUSH(sc);
   10250 		delay(150);
   10251 #if 0
   10252 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   10253 #endif
   10254 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   10255 		break;
   10256 	case WM_T_82544:	/* Reset 10000us */
   10257 	case WM_T_82540:
   10258 	case WM_T_82545:
   10259 	case WM_T_82545_3:
   10260 	case WM_T_82546:
   10261 	case WM_T_82546_3:
   10262 	case WM_T_82541:
   10263 	case WM_T_82541_2:
   10264 	case WM_T_82547:
   10265 	case WM_T_82547_2:
   10266 	case WM_T_82571:	/* Reset 100us */
   10267 	case WM_T_82572:
   10268 	case WM_T_82573:
   10269 	case WM_T_82574:
   10270 	case WM_T_82575:
   10271 	case WM_T_82576:
   10272 	case WM_T_82580:
   10273 	case WM_T_I350:
   10274 	case WM_T_I354:
   10275 	case WM_T_I210:
   10276 	case WM_T_I211:
   10277 	case WM_T_82583:
   10278 	case WM_T_80003:
   10279 		/* Generic reset */
   10280 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10281 		CSR_WRITE_FLUSH(sc);
   10282 		delay(20000);
   10283 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10284 		CSR_WRITE_FLUSH(sc);
   10285 		delay(20000);
   10286 
   10287 		if ((sc->sc_type == WM_T_82541)
   10288 		    || (sc->sc_type == WM_T_82541_2)
   10289 		    || (sc->sc_type == WM_T_82547)
   10290 		    || (sc->sc_type == WM_T_82547_2)) {
   10291 			/* Workaround for igp are done in igp_reset() */
   10292 			/* XXX add code to set LED after phy reset */
   10293 		}
   10294 		break;
   10295 	case WM_T_ICH8:
   10296 	case WM_T_ICH9:
   10297 	case WM_T_ICH10:
   10298 	case WM_T_PCH:
   10299 	case WM_T_PCH2:
   10300 	case WM_T_PCH_LPT:
   10301 	case WM_T_PCH_SPT:
   10302 	case WM_T_PCH_CNP:
   10303 		/* Generic reset */
   10304 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10305 		CSR_WRITE_FLUSH(sc);
   10306 		delay(100);
   10307 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10308 		CSR_WRITE_FLUSH(sc);
   10309 		delay(150);
   10310 		break;
   10311 	default:
   10312 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   10313 		    __func__);
   10314 		break;
   10315 	}
   10316 
   10317 	sc->phy.release(sc);
   10318 
   10319 	/* get_cfg_done */
   10320 	wm_get_cfg_done(sc);
   10321 
   10322 	/* Extra setup */
   10323 	switch (sc->sc_type) {
   10324 	case WM_T_82542_2_0:
   10325 	case WM_T_82542_2_1:
   10326 	case WM_T_82543:
   10327 	case WM_T_82544:
   10328 	case WM_T_82540:
   10329 	case WM_T_82545:
   10330 	case WM_T_82545_3:
   10331 	case WM_T_82546:
   10332 	case WM_T_82546_3:
   10333 	case WM_T_82541_2:
   10334 	case WM_T_82547_2:
   10335 	case WM_T_82571:
   10336 	case WM_T_82572:
   10337 	case WM_T_82573:
   10338 	case WM_T_82574:
   10339 	case WM_T_82583:
   10340 	case WM_T_82575:
   10341 	case WM_T_82576:
   10342 	case WM_T_82580:
   10343 	case WM_T_I350:
   10344 	case WM_T_I354:
   10345 	case WM_T_I210:
   10346 	case WM_T_I211:
   10347 	case WM_T_80003:
   10348 		/* Null */
   10349 		break;
   10350 	case WM_T_82541:
   10351 	case WM_T_82547:
   10352 		/* XXX Configure actively LED after PHY reset */
   10353 		break;
   10354 	case WM_T_ICH8:
   10355 	case WM_T_ICH9:
   10356 	case WM_T_ICH10:
   10357 	case WM_T_PCH:
   10358 	case WM_T_PCH2:
   10359 	case WM_T_PCH_LPT:
   10360 	case WM_T_PCH_SPT:
   10361 	case WM_T_PCH_CNP:
   10362 		wm_phy_post_reset(sc);
   10363 		break;
   10364 	default:
   10365 		panic("%s: unknown type\n", __func__);
   10366 		break;
   10367 	}
   10368 }
   10369 
   10370 /*
   10371  * Setup sc_phytype and mii_{read|write}reg.
   10372  *
   10373  *  To identify PHY type, correct read/write function should be selected.
   10374  * To select correct read/write function, PCI ID or MAC type are required
   10375  * without accessing PHY registers.
   10376  *
   10377  *  On the first call of this function, PHY ID is not known yet. Check
   10378  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   10379  * result might be incorrect.
   10380  *
   10381  *  In the second call, PHY OUI and model is used to identify PHY type.
   10382  * It might not be perfect because of the lack of compared entry, but it
   10383  * would be better than the first call.
   10384  *
   10385  *  If the detected new result and previous assumption is different,
   10386  * diagnous message will be printed.
   10387  */
   10388 static void
   10389 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   10390     uint16_t phy_model)
   10391 {
   10392 	device_t dev = sc->sc_dev;
   10393 	struct mii_data *mii = &sc->sc_mii;
   10394 	uint16_t new_phytype = WMPHY_UNKNOWN;
   10395 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   10396 	mii_readreg_t new_readreg;
   10397 	mii_writereg_t new_writereg;
   10398 	bool dodiag = true;
   10399 
   10400 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10401 		device_xname(sc->sc_dev), __func__));
   10402 
   10403 	/*
   10404 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   10405 	 * incorrect. So don't print diag output when it's 2nd call.
   10406 	 */
   10407 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   10408 		dodiag = false;
   10409 
   10410 	if (mii->mii_readreg == NULL) {
   10411 		/*
   10412 		 *  This is the first call of this function. For ICH and PCH
   10413 		 * variants, it's difficult to determine the PHY access method
   10414 		 * by sc_type, so use the PCI product ID for some devices.
   10415 		 */
   10416 
   10417 		switch (sc->sc_pcidevid) {
   10418 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   10419 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   10420 			/* 82577 */
   10421 			new_phytype = WMPHY_82577;
   10422 			break;
   10423 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   10424 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   10425 			/* 82578 */
   10426 			new_phytype = WMPHY_82578;
   10427 			break;
   10428 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   10429 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   10430 			/* 82579 */
   10431 			new_phytype = WMPHY_82579;
   10432 			break;
   10433 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   10434 		case PCI_PRODUCT_INTEL_82801I_BM:
   10435 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   10436 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   10437 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   10438 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   10439 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   10440 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   10441 			/* ICH8, 9, 10 with 82567 */
   10442 			new_phytype = WMPHY_BM;
   10443 			break;
   10444 		default:
   10445 			break;
   10446 		}
   10447 	} else {
   10448 		/* It's not the first call. Use PHY OUI and model */
   10449 		switch (phy_oui) {
   10450 		case MII_OUI_ATTANSIC: /* XXX ??? */
   10451 			switch (phy_model) {
   10452 			case 0x0004: /* XXX */
   10453 				new_phytype = WMPHY_82578;
   10454 				break;
   10455 			default:
   10456 				break;
   10457 			}
   10458 			break;
   10459 		case MII_OUI_xxMARVELL:
   10460 			switch (phy_model) {
   10461 			case MII_MODEL_xxMARVELL_I210:
   10462 				new_phytype = WMPHY_I210;
   10463 				break;
   10464 			case MII_MODEL_xxMARVELL_E1011:
   10465 			case MII_MODEL_xxMARVELL_E1000_3:
   10466 			case MII_MODEL_xxMARVELL_E1000_5:
   10467 			case MII_MODEL_xxMARVELL_E1112:
   10468 				new_phytype = WMPHY_M88;
   10469 				break;
   10470 			case MII_MODEL_xxMARVELL_E1149:
   10471 				new_phytype = WMPHY_BM;
   10472 				break;
   10473 			case MII_MODEL_xxMARVELL_E1111:
   10474 			case MII_MODEL_xxMARVELL_I347:
   10475 			case MII_MODEL_xxMARVELL_E1512:
   10476 			case MII_MODEL_xxMARVELL_E1340M:
   10477 			case MII_MODEL_xxMARVELL_E1543:
   10478 				new_phytype = WMPHY_M88;
   10479 				break;
   10480 			case MII_MODEL_xxMARVELL_I82563:
   10481 				new_phytype = WMPHY_GG82563;
   10482 				break;
   10483 			default:
   10484 				break;
   10485 			}
   10486 			break;
   10487 		case MII_OUI_INTEL:
   10488 			switch (phy_model) {
   10489 			case MII_MODEL_INTEL_I82577:
   10490 				new_phytype = WMPHY_82577;
   10491 				break;
   10492 			case MII_MODEL_INTEL_I82579:
   10493 				new_phytype = WMPHY_82579;
   10494 				break;
   10495 			case MII_MODEL_INTEL_I217:
   10496 				new_phytype = WMPHY_I217;
   10497 				break;
   10498 			case MII_MODEL_INTEL_I82580:
   10499 				new_phytype = WMPHY_82580;
   10500 				break;
   10501 			case MII_MODEL_INTEL_I350:
   10502 				new_phytype = WMPHY_I350;
   10503 				break;
   10504 				break;
   10505 			default:
   10506 				break;
   10507 			}
   10508 			break;
   10509 		case MII_OUI_yyINTEL:
   10510 			switch (phy_model) {
   10511 			case MII_MODEL_yyINTEL_I82562G:
   10512 			case MII_MODEL_yyINTEL_I82562EM:
   10513 			case MII_MODEL_yyINTEL_I82562ET:
   10514 				new_phytype = WMPHY_IFE;
   10515 				break;
   10516 			case MII_MODEL_yyINTEL_IGP01E1000:
   10517 				new_phytype = WMPHY_IGP;
   10518 				break;
   10519 			case MII_MODEL_yyINTEL_I82566:
   10520 				new_phytype = WMPHY_IGP_3;
   10521 				break;
   10522 			default:
   10523 				break;
   10524 			}
   10525 			break;
   10526 		default:
   10527 			break;
   10528 		}
   10529 
   10530 		if (dodiag) {
   10531 			if (new_phytype == WMPHY_UNKNOWN)
   10532 				aprint_verbose_dev(dev,
   10533 				    "%s: Unknown PHY model. OUI=%06x, "
   10534 				    "model=%04x\n", __func__, phy_oui,
   10535 				    phy_model);
   10536 
   10537 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10538 			    && (sc->sc_phytype != new_phytype)) {
   10539 				aprint_error_dev(dev, "Previously assumed PHY "
   10540 				    "type(%u) was incorrect. PHY type from PHY"
   10541 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   10542 			}
   10543 		}
   10544 	}
   10545 
   10546 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10547 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10548 		/* SGMII */
   10549 		new_readreg = wm_sgmii_readreg;
   10550 		new_writereg = wm_sgmii_writereg;
   10551 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10552 		/* BM2 (phyaddr == 1) */
   10553 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10554 		    && (new_phytype != WMPHY_BM)
   10555 		    && (new_phytype != WMPHY_UNKNOWN))
   10556 			doubt_phytype = new_phytype;
   10557 		new_phytype = WMPHY_BM;
   10558 		new_readreg = wm_gmii_bm_readreg;
   10559 		new_writereg = wm_gmii_bm_writereg;
   10560 	} else if (sc->sc_type >= WM_T_PCH) {
   10561 		/* All PCH* use _hv_ */
   10562 		new_readreg = wm_gmii_hv_readreg;
   10563 		new_writereg = wm_gmii_hv_writereg;
   10564 	} else if (sc->sc_type >= WM_T_ICH8) {
   10565 		/* non-82567 ICH8, 9 and 10 */
   10566 		new_readreg = wm_gmii_i82544_readreg;
   10567 		new_writereg = wm_gmii_i82544_writereg;
   10568 	} else if (sc->sc_type >= WM_T_80003) {
   10569 		/* 80003 */
   10570 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10571 		    && (new_phytype != WMPHY_GG82563)
   10572 		    && (new_phytype != WMPHY_UNKNOWN))
   10573 			doubt_phytype = new_phytype;
   10574 		new_phytype = WMPHY_GG82563;
   10575 		new_readreg = wm_gmii_i80003_readreg;
   10576 		new_writereg = wm_gmii_i80003_writereg;
   10577 	} else if (sc->sc_type >= WM_T_I210) {
   10578 		/* I210 and I211 */
   10579 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10580 		    && (new_phytype != WMPHY_I210)
   10581 		    && (new_phytype != WMPHY_UNKNOWN))
   10582 			doubt_phytype = new_phytype;
   10583 		new_phytype = WMPHY_I210;
   10584 		new_readreg = wm_gmii_gs40g_readreg;
   10585 		new_writereg = wm_gmii_gs40g_writereg;
   10586 	} else if (sc->sc_type >= WM_T_82580) {
   10587 		/* 82580, I350 and I354 */
   10588 		new_readreg = wm_gmii_82580_readreg;
   10589 		new_writereg = wm_gmii_82580_writereg;
   10590 	} else if (sc->sc_type >= WM_T_82544) {
   10591 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10592 		new_readreg = wm_gmii_i82544_readreg;
   10593 		new_writereg = wm_gmii_i82544_writereg;
   10594 	} else {
   10595 		new_readreg = wm_gmii_i82543_readreg;
   10596 		new_writereg = wm_gmii_i82543_writereg;
   10597 	}
   10598 
   10599 	if (new_phytype == WMPHY_BM) {
   10600 		/* All BM use _bm_ */
   10601 		new_readreg = wm_gmii_bm_readreg;
   10602 		new_writereg = wm_gmii_bm_writereg;
   10603 	}
   10604 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10605 		/* All PCH* use _hv_ */
   10606 		new_readreg = wm_gmii_hv_readreg;
   10607 		new_writereg = wm_gmii_hv_writereg;
   10608 	}
   10609 
   10610 	/* Diag output */
   10611 	if (dodiag) {
   10612 		if (doubt_phytype != WMPHY_UNKNOWN)
   10613 			aprint_error_dev(dev, "Assumed new PHY type was "
   10614 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10615 			    new_phytype);
   10616 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10617 		    && (sc->sc_phytype != new_phytype))
   10618 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10619 			    "was incorrect. New PHY type = %u\n",
   10620 			    sc->sc_phytype, new_phytype);
   10621 
   10622 		if ((mii->mii_readreg != NULL) &&
   10623 		    (new_phytype == WMPHY_UNKNOWN))
   10624 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   10625 
   10626 		if ((mii->mii_readreg != NULL) &&
   10627 		    (mii->mii_readreg != new_readreg))
   10628 			aprint_error_dev(dev, "Previously assumed PHY "
   10629 			    "read/write function was incorrect.\n");
   10630 	}
   10631 
   10632 	/* Update now */
   10633 	sc->sc_phytype = new_phytype;
   10634 	mii->mii_readreg = new_readreg;
   10635 	mii->mii_writereg = new_writereg;
   10636 	if (new_readreg == wm_gmii_hv_readreg) {
   10637 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10638 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10639 	} else if (new_readreg == wm_sgmii_readreg) {
   10640 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10641 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10642 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10643 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10644 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10645 	}
   10646 }
   10647 
   10648 /*
   10649  * wm_get_phy_id_82575:
   10650  *
   10651  * Return PHY ID. Return -1 if it failed.
   10652  */
   10653 static int
   10654 wm_get_phy_id_82575(struct wm_softc *sc)
   10655 {
   10656 	uint32_t reg;
   10657 	int phyid = -1;
   10658 
   10659 	/* XXX */
   10660 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10661 		return -1;
   10662 
   10663 	if (wm_sgmii_uses_mdio(sc)) {
   10664 		switch (sc->sc_type) {
   10665 		case WM_T_82575:
   10666 		case WM_T_82576:
   10667 			reg = CSR_READ(sc, WMREG_MDIC);
   10668 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10669 			break;
   10670 		case WM_T_82580:
   10671 		case WM_T_I350:
   10672 		case WM_T_I354:
   10673 		case WM_T_I210:
   10674 		case WM_T_I211:
   10675 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10676 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10677 			break;
   10678 		default:
   10679 			return -1;
   10680 		}
   10681 	}
   10682 
   10683 	return phyid;
   10684 }
   10685 
   10686 /*
   10687  * wm_gmii_mediainit:
   10688  *
   10689  *	Initialize media for use on 1000BASE-T devices.
   10690  */
   10691 static void
   10692 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10693 {
   10694 	device_t dev = sc->sc_dev;
   10695 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10696 	struct mii_data *mii = &sc->sc_mii;
   10697 
   10698 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   10699 		device_xname(sc->sc_dev), __func__));
   10700 
   10701 	/* We have GMII. */
   10702 	sc->sc_flags |= WM_F_HAS_MII;
   10703 
   10704 	if (sc->sc_type == WM_T_80003)
   10705 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10706 	else
   10707 		sc->sc_tipg = TIPG_1000T_DFLT;
   10708 
   10709 	/*
   10710 	 * Let the chip set speed/duplex on its own based on
   10711 	 * signals from the PHY.
   10712 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10713 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10714 	 */
   10715 	sc->sc_ctrl |= CTRL_SLU;
   10716 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10717 
   10718 	/* Initialize our media structures and probe the GMII. */
   10719 	mii->mii_ifp = ifp;
   10720 
   10721 	mii->mii_statchg = wm_gmii_statchg;
   10722 
   10723 	/* get PHY control from SMBus to PCIe */
   10724 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10725 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10726 	    || (sc->sc_type == WM_T_PCH_CNP))
   10727 		wm_init_phy_workarounds_pchlan(sc);
   10728 
   10729 	wm_gmii_reset(sc);
   10730 
   10731 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10732 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10733 	    wm_gmii_mediastatus, sc->sc_core_lock);
   10734 
   10735 	/* Setup internal SGMII PHY for SFP */
   10736 	wm_sgmii_sfp_preconfig(sc);
   10737 
   10738 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10739 	    || (sc->sc_type == WM_T_82580)
   10740 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10741 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10742 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10743 			/* Attach only one port */
   10744 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10745 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10746 		} else {
   10747 			int i, id;
   10748 			uint32_t ctrl_ext;
   10749 
   10750 			id = wm_get_phy_id_82575(sc);
   10751 			if (id != -1) {
   10752 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10753 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10754 			}
   10755 			if ((id == -1)
   10756 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10757 				/* Power on sgmii phy if it is disabled */
   10758 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10759 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10760 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10761 				CSR_WRITE_FLUSH(sc);
   10762 				delay(300*1000); /* XXX too long */
   10763 
   10764 				/*
   10765 				 * From 1 to 8.
   10766 				 *
   10767 				 * I2C access fails with I2C register's ERROR
   10768 				 * bit set, so prevent error message while
   10769 				 * scanning.
   10770 				 */
   10771 				sc->phy.no_errprint = true;
   10772 				for (i = 1; i < 8; i++)
   10773 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10774 					    0xffffffff, i, MII_OFFSET_ANY,
   10775 					    MIIF_DOPAUSE);
   10776 				sc->phy.no_errprint = false;
   10777 
   10778 				/* Restore previous sfp cage power state */
   10779 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10780 			}
   10781 		}
   10782 	} else
   10783 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10784 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10785 
   10786 	/*
   10787 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10788 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10789 	 */
   10790 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10791 		|| (sc->sc_type == WM_T_PCH_SPT)
   10792 		|| (sc->sc_type == WM_T_PCH_CNP))
   10793 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10794 		wm_set_mdio_slow_mode_hv(sc);
   10795 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10796 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10797 	}
   10798 
   10799 	/*
   10800 	 * (For ICH8 variants)
   10801 	 * If PHY detection failed, use BM's r/w function and retry.
   10802 	 */
   10803 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10804 		/* if failed, retry with *_bm_* */
   10805 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10806 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10807 		    sc->sc_phytype);
   10808 		sc->sc_phytype = WMPHY_BM;
   10809 		mii->mii_readreg = wm_gmii_bm_readreg;
   10810 		mii->mii_writereg = wm_gmii_bm_writereg;
   10811 
   10812 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10813 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10814 	}
   10815 
   10816 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10817 		/* Any PHY wasn't find */
   10818 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10819 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10820 		sc->sc_phytype = WMPHY_NONE;
   10821 	} else {
   10822 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10823 
   10824 		/*
   10825 		 * PHY Found! Check PHY type again by the second call of
   10826 		 * wm_gmii_setup_phytype.
   10827 		 */
   10828 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10829 		    child->mii_mpd_model);
   10830 
   10831 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10832 	}
   10833 }
   10834 
   10835 /*
   10836  * wm_gmii_mediachange:	[ifmedia interface function]
   10837  *
   10838  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10839  */
   10840 static int
   10841 wm_gmii_mediachange(struct ifnet *ifp)
   10842 {
   10843 	struct wm_softc *sc = ifp->if_softc;
   10844 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10845 	uint32_t reg;
   10846 	int rc;
   10847 
   10848 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   10849 		device_xname(sc->sc_dev), __func__));
   10850 	if ((ifp->if_flags & IFF_UP) == 0)
   10851 		return 0;
   10852 
   10853 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10854 	if ((sc->sc_type == WM_T_82580)
   10855 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10856 	    || (sc->sc_type == WM_T_I211)) {
   10857 		reg = CSR_READ(sc, WMREG_PHPM);
   10858 		reg &= ~PHPM_GO_LINK_D;
   10859 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10860 	}
   10861 
   10862 	/* Disable D0 LPLU. */
   10863 	wm_lplu_d0_disable(sc);
   10864 
   10865 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10866 	sc->sc_ctrl |= CTRL_SLU;
   10867 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10868 	    || (sc->sc_type > WM_T_82543)) {
   10869 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10870 	} else {
   10871 		sc->sc_ctrl &= ~CTRL_ASDE;
   10872 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10873 		if (ife->ifm_media & IFM_FDX)
   10874 			sc->sc_ctrl |= CTRL_FD;
   10875 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10876 		case IFM_10_T:
   10877 			sc->sc_ctrl |= CTRL_SPEED_10;
   10878 			break;
   10879 		case IFM_100_TX:
   10880 			sc->sc_ctrl |= CTRL_SPEED_100;
   10881 			break;
   10882 		case IFM_1000_T:
   10883 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10884 			break;
   10885 		case IFM_NONE:
   10886 			/* There is no specific setting for IFM_NONE */
   10887 			break;
   10888 		default:
   10889 			panic("wm_gmii_mediachange: bad media 0x%x",
   10890 			    ife->ifm_media);
   10891 		}
   10892 	}
   10893 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10894 	CSR_WRITE_FLUSH(sc);
   10895 
   10896 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   10897 		wm_serdes_mediachange(ifp);
   10898 
   10899 	if (sc->sc_type <= WM_T_82543)
   10900 		wm_gmii_reset(sc);
   10901 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   10902 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   10903 		/* allow time for SFP cage time to power up phy */
   10904 		delay(300 * 1000);
   10905 		wm_gmii_reset(sc);
   10906 	}
   10907 
   10908 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10909 		return 0;
   10910 	return rc;
   10911 }
   10912 
   10913 /*
   10914  * wm_gmii_mediastatus:	[ifmedia interface function]
   10915  *
   10916  *	Get the current interface media status on a 1000BASE-T device.
   10917  */
   10918 static void
   10919 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10920 {
   10921 	struct wm_softc *sc = ifp->if_softc;
   10922 
   10923 	ether_mediastatus(ifp, ifmr);
   10924 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10925 	    | sc->sc_flowflags;
   10926 }
   10927 
   10928 #define	MDI_IO		CTRL_SWDPIN(2)
   10929 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10930 #define	MDI_CLK		CTRL_SWDPIN(3)
   10931 
   10932 static void
   10933 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10934 {
   10935 	uint32_t i, v;
   10936 
   10937 	v = CSR_READ(sc, WMREG_CTRL);
   10938 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10939 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10940 
   10941 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   10942 		if (data & i)
   10943 			v |= MDI_IO;
   10944 		else
   10945 			v &= ~MDI_IO;
   10946 		CSR_WRITE(sc, WMREG_CTRL, v);
   10947 		CSR_WRITE_FLUSH(sc);
   10948 		delay(10);
   10949 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10950 		CSR_WRITE_FLUSH(sc);
   10951 		delay(10);
   10952 		CSR_WRITE(sc, WMREG_CTRL, v);
   10953 		CSR_WRITE_FLUSH(sc);
   10954 		delay(10);
   10955 	}
   10956 }
   10957 
   10958 static uint16_t
   10959 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10960 {
   10961 	uint32_t v, i;
   10962 	uint16_t data = 0;
   10963 
   10964 	v = CSR_READ(sc, WMREG_CTRL);
   10965 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10966 	v |= CTRL_SWDPIO(3);
   10967 
   10968 	CSR_WRITE(sc, WMREG_CTRL, v);
   10969 	CSR_WRITE_FLUSH(sc);
   10970 	delay(10);
   10971 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10972 	CSR_WRITE_FLUSH(sc);
   10973 	delay(10);
   10974 	CSR_WRITE(sc, WMREG_CTRL, v);
   10975 	CSR_WRITE_FLUSH(sc);
   10976 	delay(10);
   10977 
   10978 	for (i = 0; i < 16; i++) {
   10979 		data <<= 1;
   10980 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10981 		CSR_WRITE_FLUSH(sc);
   10982 		delay(10);
   10983 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10984 			data |= 1;
   10985 		CSR_WRITE(sc, WMREG_CTRL, v);
   10986 		CSR_WRITE_FLUSH(sc);
   10987 		delay(10);
   10988 	}
   10989 
   10990 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10991 	CSR_WRITE_FLUSH(sc);
   10992 	delay(10);
   10993 	CSR_WRITE(sc, WMREG_CTRL, v);
   10994 	CSR_WRITE_FLUSH(sc);
   10995 	delay(10);
   10996 
   10997 	return data;
   10998 }
   10999 
   11000 #undef MDI_IO
   11001 #undef MDI_DIR
   11002 #undef MDI_CLK
   11003 
   11004 /*
   11005  * wm_gmii_i82543_readreg:	[mii interface function]
   11006  *
   11007  *	Read a PHY register on the GMII (i82543 version).
   11008  */
   11009 static int
   11010 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11011 {
   11012 	struct wm_softc *sc = device_private(dev);
   11013 
   11014 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11015 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   11016 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   11017 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   11018 
   11019 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   11020 		device_xname(dev), phy, reg, *val));
   11021 
   11022 	return 0;
   11023 }
   11024 
   11025 /*
   11026  * wm_gmii_i82543_writereg:	[mii interface function]
   11027  *
   11028  *	Write a PHY register on the GMII (i82543 version).
   11029  */
   11030 static int
   11031 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   11032 {
   11033 	struct wm_softc *sc = device_private(dev);
   11034 
   11035 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11036 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   11037 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   11038 	    (MII_COMMAND_START << 30), 32);
   11039 
   11040 	return 0;
   11041 }
   11042 
   11043 /*
   11044  * wm_gmii_mdic_readreg:	[mii interface function]
   11045  *
   11046  *	Read a PHY register on the GMII.
   11047  */
   11048 static int
   11049 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11050 {
   11051 	struct wm_softc *sc = device_private(dev);
   11052 	uint32_t mdic = 0;
   11053 	int i;
   11054 
   11055 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11056 	    && (reg > MII_ADDRMASK)) {
   11057 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11058 		    __func__, sc->sc_phytype, reg);
   11059 		reg &= MII_ADDRMASK;
   11060 	}
   11061 
   11062 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   11063 	    MDIC_REGADD(reg));
   11064 
   11065 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11066 		delay(50);
   11067 		mdic = CSR_READ(sc, WMREG_MDIC);
   11068 		if (mdic & MDIC_READY)
   11069 			break;
   11070 	}
   11071 
   11072 	if ((mdic & MDIC_READY) == 0) {
   11073 		DPRINTF(sc, WM_DEBUG_GMII,
   11074 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   11075 			device_xname(dev), phy, reg));
   11076 		return ETIMEDOUT;
   11077 	} else if (mdic & MDIC_E) {
   11078 		/* This is normal if no PHY is present. */
   11079 		DPRINTF(sc, WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   11080 			device_xname(sc->sc_dev), phy, reg));
   11081 		return -1;
   11082 	} else
   11083 		*val = MDIC_DATA(mdic);
   11084 
   11085 	/*
   11086 	 * Allow some time after each MDIC transaction to avoid
   11087 	 * reading duplicate data in the next MDIC transaction.
   11088 	 */
   11089 	if (sc->sc_type == WM_T_PCH2)
   11090 		delay(100);
   11091 
   11092 	return 0;
   11093 }
   11094 
   11095 /*
   11096  * wm_gmii_mdic_writereg:	[mii interface function]
   11097  *
   11098  *	Write a PHY register on the GMII.
   11099  */
   11100 static int
   11101 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   11102 {
   11103 	struct wm_softc *sc = device_private(dev);
   11104 	uint32_t mdic = 0;
   11105 	int i;
   11106 
   11107 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11108 	    && (reg > MII_ADDRMASK)) {
   11109 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11110 		    __func__, sc->sc_phytype, reg);
   11111 		reg &= MII_ADDRMASK;
   11112 	}
   11113 
   11114 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   11115 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   11116 
   11117 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11118 		delay(50);
   11119 		mdic = CSR_READ(sc, WMREG_MDIC);
   11120 		if (mdic & MDIC_READY)
   11121 			break;
   11122 	}
   11123 
   11124 	if ((mdic & MDIC_READY) == 0) {
   11125 		DPRINTF(sc, WM_DEBUG_GMII,
   11126 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   11127 			device_xname(dev), phy, reg));
   11128 		return ETIMEDOUT;
   11129 	} else if (mdic & MDIC_E) {
   11130 		DPRINTF(sc, WM_DEBUG_GMII,
   11131 		    ("%s: MDIC write error: phy %d reg %d\n",
   11132 			device_xname(dev), phy, reg));
   11133 		return -1;
   11134 	}
   11135 
   11136 	/*
   11137 	 * Allow some time after each MDIC transaction to avoid
   11138 	 * reading duplicate data in the next MDIC transaction.
   11139 	 */
   11140 	if (sc->sc_type == WM_T_PCH2)
   11141 		delay(100);
   11142 
   11143 	return 0;
   11144 }
   11145 
   11146 /*
   11147  * wm_gmii_i82544_readreg:	[mii interface function]
   11148  *
   11149  *	Read a PHY register on the GMII.
   11150  */
   11151 static int
   11152 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11153 {
   11154 	struct wm_softc *sc = device_private(dev);
   11155 	int rv;
   11156 
   11157 	if (sc->phy.acquire(sc)) {
   11158 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11159 		return -1;
   11160 	}
   11161 
   11162 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   11163 
   11164 	sc->phy.release(sc);
   11165 
   11166 	return rv;
   11167 }
   11168 
   11169 static int
   11170 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11171 {
   11172 	struct wm_softc *sc = device_private(dev);
   11173 	int rv;
   11174 
   11175 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11176 		switch (sc->sc_phytype) {
   11177 		case WMPHY_IGP:
   11178 		case WMPHY_IGP_2:
   11179 		case WMPHY_IGP_3:
   11180 			rv = wm_gmii_mdic_writereg(dev, phy,
   11181 			    IGPHY_PAGE_SELECT, reg);
   11182 			if (rv != 0)
   11183 				return rv;
   11184 			break;
   11185 		default:
   11186 #ifdef WM_DEBUG
   11187 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   11188 			    __func__, sc->sc_phytype, reg);
   11189 #endif
   11190 			break;
   11191 		}
   11192 	}
   11193 
   11194 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11195 }
   11196 
   11197 /*
   11198  * wm_gmii_i82544_writereg:	[mii interface function]
   11199  *
   11200  *	Write a PHY register on the GMII.
   11201  */
   11202 static int
   11203 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   11204 {
   11205 	struct wm_softc *sc = device_private(dev);
   11206 	int rv;
   11207 
   11208 	if (sc->phy.acquire(sc)) {
   11209 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11210 		return -1;
   11211 	}
   11212 
   11213 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   11214 	sc->phy.release(sc);
   11215 
   11216 	return rv;
   11217 }
   11218 
   11219 static int
   11220 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11221 {
   11222 	struct wm_softc *sc = device_private(dev);
   11223 	int rv;
   11224 
   11225 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11226 		switch (sc->sc_phytype) {
   11227 		case WMPHY_IGP:
   11228 		case WMPHY_IGP_2:
   11229 		case WMPHY_IGP_3:
   11230 			rv = wm_gmii_mdic_writereg(dev, phy,
   11231 			    IGPHY_PAGE_SELECT, reg);
   11232 			if (rv != 0)
   11233 				return rv;
   11234 			break;
   11235 		default:
   11236 #ifdef WM_DEBUG
   11237 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   11238 			    __func__, sc->sc_phytype, reg);
   11239 #endif
   11240 			break;
   11241 		}
   11242 	}
   11243 
   11244 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11245 }
   11246 
   11247 /*
   11248  * wm_gmii_i80003_readreg:	[mii interface function]
   11249  *
   11250  *	Read a PHY register on the kumeran
   11251  * This could be handled by the PHY layer if we didn't have to lock the
   11252  * resource ...
   11253  */
   11254 static int
   11255 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11256 {
   11257 	struct wm_softc *sc = device_private(dev);
   11258 	int page_select;
   11259 	uint16_t temp, temp2;
   11260 	int rv = 0;
   11261 
   11262 	if (phy != 1) /* Only one PHY on kumeran bus */
   11263 		return -1;
   11264 
   11265 	if (sc->phy.acquire(sc)) {
   11266 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11267 		return -1;
   11268 	}
   11269 
   11270 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11271 		page_select = GG82563_PHY_PAGE_SELECT;
   11272 	else {
   11273 		/*
   11274 		 * Use Alternative Page Select register to access registers
   11275 		 * 30 and 31.
   11276 		 */
   11277 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11278 	}
   11279 	temp = reg >> GG82563_PAGE_SHIFT;
   11280 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11281 		goto out;
   11282 
   11283 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11284 		/*
   11285 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11286 		 * register.
   11287 		 */
   11288 		delay(200);
   11289 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11290 		if ((rv != 0) || (temp2 != temp)) {
   11291 			device_printf(dev, "%s failed\n", __func__);
   11292 			rv = -1;
   11293 			goto out;
   11294 		}
   11295 		delay(200);
   11296 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11297 		delay(200);
   11298 	} else
   11299 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11300 
   11301 out:
   11302 	sc->phy.release(sc);
   11303 	return rv;
   11304 }
   11305 
   11306 /*
   11307  * wm_gmii_i80003_writereg:	[mii interface function]
   11308  *
   11309  *	Write a PHY register on the kumeran.
   11310  * This could be handled by the PHY layer if we didn't have to lock the
   11311  * resource ...
   11312  */
   11313 static int
   11314 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   11315 {
   11316 	struct wm_softc *sc = device_private(dev);
   11317 	int page_select, rv;
   11318 	uint16_t temp, temp2;
   11319 
   11320 	if (phy != 1) /* Only one PHY on kumeran bus */
   11321 		return -1;
   11322 
   11323 	if (sc->phy.acquire(sc)) {
   11324 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11325 		return -1;
   11326 	}
   11327 
   11328 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11329 		page_select = GG82563_PHY_PAGE_SELECT;
   11330 	else {
   11331 		/*
   11332 		 * Use Alternative Page Select register to access registers
   11333 		 * 30 and 31.
   11334 		 */
   11335 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11336 	}
   11337 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   11338 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11339 		goto out;
   11340 
   11341 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11342 		/*
   11343 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11344 		 * register.
   11345 		 */
   11346 		delay(200);
   11347 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11348 		if ((rv != 0) || (temp2 != temp)) {
   11349 			device_printf(dev, "%s failed\n", __func__);
   11350 			rv = -1;
   11351 			goto out;
   11352 		}
   11353 		delay(200);
   11354 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11355 		delay(200);
   11356 	} else
   11357 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11358 
   11359 out:
   11360 	sc->phy.release(sc);
   11361 	return rv;
   11362 }
   11363 
   11364 /*
   11365  * wm_gmii_bm_readreg:	[mii interface function]
   11366  *
   11367  *	Read a PHY register on the kumeran
   11368  * This could be handled by the PHY layer if we didn't have to lock the
   11369  * resource ...
   11370  */
   11371 static int
   11372 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11373 {
   11374 	struct wm_softc *sc = device_private(dev);
   11375 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11376 	int rv;
   11377 
   11378 	if (sc->phy.acquire(sc)) {
   11379 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11380 		return -1;
   11381 	}
   11382 
   11383 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11384 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11385 		    || (reg == 31)) ? 1 : phy;
   11386 	/* Page 800 works differently than the rest so it has its own func */
   11387 	if (page == BM_WUC_PAGE) {
   11388 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11389 		goto release;
   11390 	}
   11391 
   11392 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11393 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11394 		    && (sc->sc_type != WM_T_82583))
   11395 			rv = wm_gmii_mdic_writereg(dev, phy,
   11396 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11397 		else
   11398 			rv = wm_gmii_mdic_writereg(dev, phy,
   11399 			    BME1000_PHY_PAGE_SELECT, page);
   11400 		if (rv != 0)
   11401 			goto release;
   11402 	}
   11403 
   11404 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11405 
   11406 release:
   11407 	sc->phy.release(sc);
   11408 	return rv;
   11409 }
   11410 
   11411 /*
   11412  * wm_gmii_bm_writereg:	[mii interface function]
   11413  *
   11414  *	Write a PHY register on the kumeran.
   11415  * This could be handled by the PHY layer if we didn't have to lock the
   11416  * resource ...
   11417  */
   11418 static int
   11419 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   11420 {
   11421 	struct wm_softc *sc = device_private(dev);
   11422 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11423 	int rv;
   11424 
   11425 	if (sc->phy.acquire(sc)) {
   11426 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11427 		return -1;
   11428 	}
   11429 
   11430 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11431 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11432 		    || (reg == 31)) ? 1 : phy;
   11433 	/* Page 800 works differently than the rest so it has its own func */
   11434 	if (page == BM_WUC_PAGE) {
   11435 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   11436 		goto release;
   11437 	}
   11438 
   11439 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11440 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11441 		    && (sc->sc_type != WM_T_82583))
   11442 			rv = wm_gmii_mdic_writereg(dev, phy,
   11443 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11444 		else
   11445 			rv = wm_gmii_mdic_writereg(dev, phy,
   11446 			    BME1000_PHY_PAGE_SELECT, page);
   11447 		if (rv != 0)
   11448 			goto release;
   11449 	}
   11450 
   11451 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11452 
   11453 release:
   11454 	sc->phy.release(sc);
   11455 	return rv;
   11456 }
   11457 
   11458 /*
   11459  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   11460  *  @dev: pointer to the HW structure
   11461  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   11462  *
   11463  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   11464  *  address to store contents of the BM_WUC_ENABLE_REG register.
   11465  */
   11466 static int
   11467 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11468 {
   11469 #ifdef WM_DEBUG
   11470 	struct wm_softc *sc = device_private(dev);
   11471 #endif
   11472 	uint16_t temp;
   11473 	int rv;
   11474 
   11475 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11476 		device_xname(dev), __func__));
   11477 
   11478 	if (!phy_regp)
   11479 		return -1;
   11480 
   11481 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   11482 
   11483 	/* Select Port Control Registers page */
   11484 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11485 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11486 	if (rv != 0)
   11487 		return rv;
   11488 
   11489 	/* Read WUCE and save it */
   11490 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   11491 	if (rv != 0)
   11492 		return rv;
   11493 
   11494 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   11495 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   11496 	 */
   11497 	temp = *phy_regp;
   11498 	temp |= BM_WUC_ENABLE_BIT;
   11499 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   11500 
   11501 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   11502 		return rv;
   11503 
   11504 	/* Select Host Wakeup Registers page - caller now able to write
   11505 	 * registers on the Wakeup registers page
   11506 	 */
   11507 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11508 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   11509 }
   11510 
   11511 /*
   11512  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   11513  *  @dev: pointer to the HW structure
   11514  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   11515  *
   11516  *  Restore BM_WUC_ENABLE_REG to its original value.
   11517  *
   11518  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   11519  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   11520  *  caller.
   11521  */
   11522 static int
   11523 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11524 {
   11525 #ifdef WM_DEBUG
   11526 	struct wm_softc *sc = device_private(dev);
   11527 #endif
   11528 
   11529 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11530 		device_xname(dev), __func__));
   11531 
   11532 	if (!phy_regp)
   11533 		return -1;
   11534 
   11535 	/* Select Port Control Registers page */
   11536 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11537 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11538 
   11539 	/* Restore 769.17 to its original value */
   11540 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   11541 
   11542 	return 0;
   11543 }
   11544 
   11545 /*
   11546  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   11547  *  @sc: pointer to the HW structure
   11548  *  @offset: register offset to be read or written
   11549  *  @val: pointer to the data to read or write
   11550  *  @rd: determines if operation is read or write
   11551  *  @page_set: BM_WUC_PAGE already set and access enabled
   11552  *
   11553  *  Read the PHY register at offset and store the retrieved information in
   11554  *  data, or write data to PHY register at offset.  Note the procedure to
   11555  *  access the PHY wakeup registers is different than reading the other PHY
   11556  *  registers. It works as such:
   11557  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11558  *  2) Set page to 800 for host (801 if we were manageability)
   11559  *  3) Write the address using the address opcode (0x11)
   11560  *  4) Read or write the data using the data opcode (0x12)
   11561  *  5) Restore 769.17.2 to its original value
   11562  *
   11563  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11564  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11565  *
   11566  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11567  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11568  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11569  */
   11570 static int
   11571 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11572 	bool page_set)
   11573 {
   11574 	struct wm_softc *sc = device_private(dev);
   11575 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11576 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11577 	uint16_t wuce;
   11578 	int rv = 0;
   11579 
   11580 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11581 		device_xname(dev), __func__));
   11582 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11583 	if ((sc->sc_type == WM_T_PCH)
   11584 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11585 		device_printf(dev,
   11586 		    "Attempting to access page %d while gig enabled.\n", page);
   11587 	}
   11588 
   11589 	if (!page_set) {
   11590 		/* Enable access to PHY wakeup registers */
   11591 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11592 		if (rv != 0) {
   11593 			device_printf(dev,
   11594 			    "%s: Could not enable PHY wakeup reg access\n",
   11595 			    __func__);
   11596 			return rv;
   11597 		}
   11598 	}
   11599 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11600 		device_xname(sc->sc_dev), __func__, page, regnum));
   11601 
   11602 	/*
   11603 	 * 2) Access PHY wakeup register.
   11604 	 * See wm_access_phy_wakeup_reg_bm.
   11605 	 */
   11606 
   11607 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11608 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11609 	if (rv != 0)
   11610 		return rv;
   11611 
   11612 	if (rd) {
   11613 		/* Read the Wakeup register page value using opcode 0x12 */
   11614 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11615 	} else {
   11616 		/* Write the Wakeup register page value using opcode 0x12 */
   11617 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11618 	}
   11619 	if (rv != 0)
   11620 		return rv;
   11621 
   11622 	if (!page_set)
   11623 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11624 
   11625 	return rv;
   11626 }
   11627 
   11628 /*
   11629  * wm_gmii_hv_readreg:	[mii interface function]
   11630  *
   11631  *	Read a PHY register on the kumeran
   11632  * This could be handled by the PHY layer if we didn't have to lock the
   11633  * resource ...
   11634  */
   11635 static int
   11636 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11637 {
   11638 	struct wm_softc *sc = device_private(dev);
   11639 	int rv;
   11640 
   11641 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11642 		device_xname(dev), __func__));
   11643 	if (sc->phy.acquire(sc)) {
   11644 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11645 		return -1;
   11646 	}
   11647 
   11648 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11649 	sc->phy.release(sc);
   11650 	return rv;
   11651 }
   11652 
   11653 static int
   11654 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11655 {
   11656 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11657 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11658 	int rv;
   11659 
   11660 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11661 
   11662 	/* Page 800 works differently than the rest so it has its own func */
   11663 	if (page == BM_WUC_PAGE)
   11664 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11665 
   11666 	/*
   11667 	 * Lower than page 768 works differently than the rest so it has its
   11668 	 * own func
   11669 	 */
   11670 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11671 		device_printf(dev, "gmii_hv_readreg!!!\n");
   11672 		return -1;
   11673 	}
   11674 
   11675 	/*
   11676 	 * XXX I21[789] documents say that the SMBus Address register is at
   11677 	 * PHY address 01, Page 0 (not 768), Register 26.
   11678 	 */
   11679 	if (page == HV_INTC_FC_PAGE_START)
   11680 		page = 0;
   11681 
   11682 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11683 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11684 		    page << BME1000_PAGE_SHIFT);
   11685 		if (rv != 0)
   11686 			return rv;
   11687 	}
   11688 
   11689 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11690 }
   11691 
   11692 /*
   11693  * wm_gmii_hv_writereg:	[mii interface function]
   11694  *
   11695  *	Write a PHY register on the kumeran.
   11696  * This could be handled by the PHY layer if we didn't have to lock the
   11697  * resource ...
   11698  */
   11699 static int
   11700 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11701 {
   11702 	struct wm_softc *sc = device_private(dev);
   11703 	int rv;
   11704 
   11705 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11706 		device_xname(dev), __func__));
   11707 
   11708 	if (sc->phy.acquire(sc)) {
   11709 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11710 		return -1;
   11711 	}
   11712 
   11713 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11714 	sc->phy.release(sc);
   11715 
   11716 	return rv;
   11717 }
   11718 
   11719 static int
   11720 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11721 {
   11722 	struct wm_softc *sc = device_private(dev);
   11723 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11724 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11725 	int rv;
   11726 
   11727 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11728 
   11729 	/* Page 800 works differently than the rest so it has its own func */
   11730 	if (page == BM_WUC_PAGE)
   11731 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11732 		    false);
   11733 
   11734 	/*
   11735 	 * Lower than page 768 works differently than the rest so it has its
   11736 	 * own func
   11737 	 */
   11738 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11739 		device_printf(dev, "gmii_hv_writereg!!!\n");
   11740 		return -1;
   11741 	}
   11742 
   11743 	{
   11744 		/*
   11745 		 * XXX I21[789] documents say that the SMBus Address register
   11746 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11747 		 */
   11748 		if (page == HV_INTC_FC_PAGE_START)
   11749 			page = 0;
   11750 
   11751 		/*
   11752 		 * XXX Workaround MDIO accesses being disabled after entering
   11753 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11754 		 * register is set)
   11755 		 */
   11756 		if (sc->sc_phytype == WMPHY_82578) {
   11757 			struct mii_softc *child;
   11758 
   11759 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11760 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11761 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11762 			    && ((val & (1 << 11)) != 0)) {
   11763 				device_printf(dev, "XXX need workaround\n");
   11764 			}
   11765 		}
   11766 
   11767 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11768 			rv = wm_gmii_mdic_writereg(dev, 1,
   11769 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11770 			if (rv != 0)
   11771 				return rv;
   11772 		}
   11773 	}
   11774 
   11775 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11776 }
   11777 
   11778 /*
   11779  * wm_gmii_82580_readreg:	[mii interface function]
   11780  *
   11781  *	Read a PHY register on the 82580 and I350.
   11782  * This could be handled by the PHY layer if we didn't have to lock the
   11783  * resource ...
   11784  */
   11785 static int
   11786 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11787 {
   11788 	struct wm_softc *sc = device_private(dev);
   11789 	int rv;
   11790 
   11791 	if (sc->phy.acquire(sc) != 0) {
   11792 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11793 		return -1;
   11794 	}
   11795 
   11796 #ifdef DIAGNOSTIC
   11797 	if (reg > MII_ADDRMASK) {
   11798 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11799 		    __func__, sc->sc_phytype, reg);
   11800 		reg &= MII_ADDRMASK;
   11801 	}
   11802 #endif
   11803 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11804 
   11805 	sc->phy.release(sc);
   11806 	return rv;
   11807 }
   11808 
   11809 /*
   11810  * wm_gmii_82580_writereg:	[mii interface function]
   11811  *
   11812  *	Write a PHY register on the 82580 and I350.
   11813  * This could be handled by the PHY layer if we didn't have to lock the
   11814  * resource ...
   11815  */
   11816 static int
   11817 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11818 {
   11819 	struct wm_softc *sc = device_private(dev);
   11820 	int rv;
   11821 
   11822 	if (sc->phy.acquire(sc) != 0) {
   11823 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11824 		return -1;
   11825 	}
   11826 
   11827 #ifdef DIAGNOSTIC
   11828 	if (reg > MII_ADDRMASK) {
   11829 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11830 		    __func__, sc->sc_phytype, reg);
   11831 		reg &= MII_ADDRMASK;
   11832 	}
   11833 #endif
   11834 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11835 
   11836 	sc->phy.release(sc);
   11837 	return rv;
   11838 }
   11839 
   11840 /*
   11841  * wm_gmii_gs40g_readreg:	[mii interface function]
   11842  *
   11843  *	Read a PHY register on the I2100 and I211.
   11844  * This could be handled by the PHY layer if we didn't have to lock the
   11845  * resource ...
   11846  */
   11847 static int
   11848 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11849 {
   11850 	struct wm_softc *sc = device_private(dev);
   11851 	int page, offset;
   11852 	int rv;
   11853 
   11854 	/* Acquire semaphore */
   11855 	if (sc->phy.acquire(sc)) {
   11856 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11857 		return -1;
   11858 	}
   11859 
   11860 	/* Page select */
   11861 	page = reg >> GS40G_PAGE_SHIFT;
   11862 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11863 	if (rv != 0)
   11864 		goto release;
   11865 
   11866 	/* Read reg */
   11867 	offset = reg & GS40G_OFFSET_MASK;
   11868 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11869 
   11870 release:
   11871 	sc->phy.release(sc);
   11872 	return rv;
   11873 }
   11874 
   11875 /*
   11876  * wm_gmii_gs40g_writereg:	[mii interface function]
   11877  *
   11878  *	Write a PHY register on the I210 and I211.
   11879  * This could be handled by the PHY layer if we didn't have to lock the
   11880  * resource ...
   11881  */
   11882 static int
   11883 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11884 {
   11885 	struct wm_softc *sc = device_private(dev);
   11886 	uint16_t page;
   11887 	int offset, rv;
   11888 
   11889 	/* Acquire semaphore */
   11890 	if (sc->phy.acquire(sc)) {
   11891 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11892 		return -1;
   11893 	}
   11894 
   11895 	/* Page select */
   11896 	page = reg >> GS40G_PAGE_SHIFT;
   11897 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11898 	if (rv != 0)
   11899 		goto release;
   11900 
   11901 	/* Write reg */
   11902 	offset = reg & GS40G_OFFSET_MASK;
   11903 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11904 
   11905 release:
   11906 	/* Release semaphore */
   11907 	sc->phy.release(sc);
   11908 	return rv;
   11909 }
   11910 
   11911 /*
   11912  * wm_gmii_statchg:	[mii interface function]
   11913  *
   11914  *	Callback from MII layer when media changes.
   11915  */
   11916 static void
   11917 wm_gmii_statchg(struct ifnet *ifp)
   11918 {
   11919 	struct wm_softc *sc = ifp->if_softc;
   11920 	struct mii_data *mii = &sc->sc_mii;
   11921 
   11922 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11923 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11924 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11925 
   11926 	/* Get flow control negotiation result. */
   11927 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11928 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11929 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11930 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11931 	}
   11932 
   11933 	if (sc->sc_flowflags & IFM_FLOW) {
   11934 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11935 			sc->sc_ctrl |= CTRL_TFCE;
   11936 			sc->sc_fcrtl |= FCRTL_XONE;
   11937 		}
   11938 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11939 			sc->sc_ctrl |= CTRL_RFCE;
   11940 	}
   11941 
   11942 	if (mii->mii_media_active & IFM_FDX) {
   11943 		DPRINTF(sc, WM_DEBUG_LINK,
   11944 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11945 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11946 	} else {
   11947 		DPRINTF(sc, WM_DEBUG_LINK,
   11948 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11949 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11950 	}
   11951 
   11952 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11953 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11954 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11955 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11956 	if (sc->sc_type == WM_T_80003) {
   11957 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   11958 		case IFM_1000_T:
   11959 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11960 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11961 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11962 			break;
   11963 		default:
   11964 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11965 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11966 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11967 			break;
   11968 		}
   11969 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11970 	}
   11971 }
   11972 
   11973 /* kumeran related (80003, ICH* and PCH*) */
   11974 
   11975 /*
   11976  * wm_kmrn_readreg:
   11977  *
   11978  *	Read a kumeran register
   11979  */
   11980 static int
   11981 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11982 {
   11983 	int rv;
   11984 
   11985 	if (sc->sc_type == WM_T_80003)
   11986 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11987 	else
   11988 		rv = sc->phy.acquire(sc);
   11989 	if (rv != 0) {
   11990 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11991 		    __func__);
   11992 		return rv;
   11993 	}
   11994 
   11995 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11996 
   11997 	if (sc->sc_type == WM_T_80003)
   11998 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11999 	else
   12000 		sc->phy.release(sc);
   12001 
   12002 	return rv;
   12003 }
   12004 
   12005 static int
   12006 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   12007 {
   12008 
   12009 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12010 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   12011 	    KUMCTRLSTA_REN);
   12012 	CSR_WRITE_FLUSH(sc);
   12013 	delay(2);
   12014 
   12015 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   12016 
   12017 	return 0;
   12018 }
   12019 
   12020 /*
   12021  * wm_kmrn_writereg:
   12022  *
   12023  *	Write a kumeran register
   12024  */
   12025 static int
   12026 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   12027 {
   12028 	int rv;
   12029 
   12030 	if (sc->sc_type == WM_T_80003)
   12031 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12032 	else
   12033 		rv = sc->phy.acquire(sc);
   12034 	if (rv != 0) {
   12035 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   12036 		    __func__);
   12037 		return rv;
   12038 	}
   12039 
   12040 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   12041 
   12042 	if (sc->sc_type == WM_T_80003)
   12043 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12044 	else
   12045 		sc->phy.release(sc);
   12046 
   12047 	return rv;
   12048 }
   12049 
   12050 static int
   12051 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   12052 {
   12053 
   12054 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12055 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   12056 
   12057 	return 0;
   12058 }
   12059 
   12060 /*
   12061  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   12062  * This access method is different from IEEE MMD.
   12063  */
   12064 static int
   12065 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   12066 {
   12067 	struct wm_softc *sc = device_private(dev);
   12068 	int rv;
   12069 
   12070 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   12071 	if (rv != 0)
   12072 		return rv;
   12073 
   12074 	if (rd)
   12075 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   12076 	else
   12077 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   12078 	return rv;
   12079 }
   12080 
   12081 static int
   12082 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   12083 {
   12084 
   12085 	return wm_access_emi_reg_locked(dev, reg, val, true);
   12086 }
   12087 
   12088 static int
   12089 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   12090 {
   12091 
   12092 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   12093 }
   12094 
   12095 /* SGMII related */
   12096 
   12097 /*
   12098  * wm_sgmii_uses_mdio
   12099  *
   12100  * Check whether the transaction is to the internal PHY or the external
   12101  * MDIO interface. Return true if it's MDIO.
   12102  */
   12103 static bool
   12104 wm_sgmii_uses_mdio(struct wm_softc *sc)
   12105 {
   12106 	uint32_t reg;
   12107 	bool ismdio = false;
   12108 
   12109 	switch (sc->sc_type) {
   12110 	case WM_T_82575:
   12111 	case WM_T_82576:
   12112 		reg = CSR_READ(sc, WMREG_MDIC);
   12113 		ismdio = ((reg & MDIC_DEST) != 0);
   12114 		break;
   12115 	case WM_T_82580:
   12116 	case WM_T_I350:
   12117 	case WM_T_I354:
   12118 	case WM_T_I210:
   12119 	case WM_T_I211:
   12120 		reg = CSR_READ(sc, WMREG_MDICNFG);
   12121 		ismdio = ((reg & MDICNFG_DEST) != 0);
   12122 		break;
   12123 	default:
   12124 		break;
   12125 	}
   12126 
   12127 	return ismdio;
   12128 }
   12129 
   12130 /* Setup internal SGMII PHY for SFP */
   12131 static void
   12132 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
   12133 {
   12134 	uint16_t id1, id2, phyreg;
   12135 	int i, rv;
   12136 
   12137 	if (((sc->sc_flags & WM_F_SGMII) == 0)
   12138 	    || ((sc->sc_flags & WM_F_SFP) == 0))
   12139 		return;
   12140 
   12141 	for (i = 0; i < MII_NPHY; i++) {
   12142 		sc->phy.no_errprint = true;
   12143 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
   12144 		if (rv != 0)
   12145 			continue;
   12146 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
   12147 		if (rv != 0)
   12148 			continue;
   12149 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
   12150 			continue;
   12151 		sc->phy.no_errprint = false;
   12152 
   12153 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
   12154 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
   12155 		phyreg |= ESSR_SGMII_WOC_COPPER;
   12156 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
   12157 		break;
   12158 	}
   12159 
   12160 }
   12161 
   12162 /*
   12163  * wm_sgmii_readreg:	[mii interface function]
   12164  *
   12165  *	Read a PHY register on the SGMII
   12166  * This could be handled by the PHY layer if we didn't have to lock the
   12167  * resource ...
   12168  */
   12169 static int
   12170 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12171 {
   12172 	struct wm_softc *sc = device_private(dev);
   12173 	int rv;
   12174 
   12175 	if (sc->phy.acquire(sc)) {
   12176 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12177 		return -1;
   12178 	}
   12179 
   12180 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   12181 
   12182 	sc->phy.release(sc);
   12183 	return rv;
   12184 }
   12185 
   12186 static int
   12187 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12188 {
   12189 	struct wm_softc *sc = device_private(dev);
   12190 	uint32_t i2ccmd;
   12191 	int i, rv = 0;
   12192 
   12193 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12194 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12195 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12196 
   12197 	/* Poll the ready bit */
   12198 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12199 		delay(50);
   12200 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12201 		if (i2ccmd & I2CCMD_READY)
   12202 			break;
   12203 	}
   12204 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12205 		device_printf(dev, "I2CCMD Read did not complete\n");
   12206 		rv = ETIMEDOUT;
   12207 	}
   12208 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12209 		if (!sc->phy.no_errprint)
   12210 			device_printf(dev, "I2CCMD Error bit set\n");
   12211 		rv = EIO;
   12212 	}
   12213 
   12214 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   12215 
   12216 	return rv;
   12217 }
   12218 
   12219 /*
   12220  * wm_sgmii_writereg:	[mii interface function]
   12221  *
   12222  *	Write a PHY register on the SGMII.
   12223  * This could be handled by the PHY layer if we didn't have to lock the
   12224  * resource ...
   12225  */
   12226 static int
   12227 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   12228 {
   12229 	struct wm_softc *sc = device_private(dev);
   12230 	int rv;
   12231 
   12232 	if (sc->phy.acquire(sc) != 0) {
   12233 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12234 		return -1;
   12235 	}
   12236 
   12237 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   12238 
   12239 	sc->phy.release(sc);
   12240 
   12241 	return rv;
   12242 }
   12243 
   12244 static int
   12245 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12246 {
   12247 	struct wm_softc *sc = device_private(dev);
   12248 	uint32_t i2ccmd;
   12249 	uint16_t swapdata;
   12250 	int rv = 0;
   12251 	int i;
   12252 
   12253 	/* Swap the data bytes for the I2C interface */
   12254 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   12255 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12256 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   12257 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12258 
   12259 	/* Poll the ready bit */
   12260 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12261 		delay(50);
   12262 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12263 		if (i2ccmd & I2CCMD_READY)
   12264 			break;
   12265 	}
   12266 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12267 		device_printf(dev, "I2CCMD Write did not complete\n");
   12268 		rv = ETIMEDOUT;
   12269 	}
   12270 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12271 		device_printf(dev, "I2CCMD Error bit set\n");
   12272 		rv = EIO;
   12273 	}
   12274 
   12275 	return rv;
   12276 }
   12277 
   12278 /* TBI related */
   12279 
   12280 static bool
   12281 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   12282 {
   12283 	bool sig;
   12284 
   12285 	sig = ctrl & CTRL_SWDPIN(1);
   12286 
   12287 	/*
   12288 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   12289 	 * detect a signal, 1 if they don't.
   12290 	 */
   12291 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   12292 		sig = !sig;
   12293 
   12294 	return sig;
   12295 }
   12296 
   12297 /*
   12298  * wm_tbi_mediainit:
   12299  *
   12300  *	Initialize media for use on 1000BASE-X devices.
   12301  */
   12302 static void
   12303 wm_tbi_mediainit(struct wm_softc *sc)
   12304 {
   12305 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12306 	const char *sep = "";
   12307 
   12308 	if (sc->sc_type < WM_T_82543)
   12309 		sc->sc_tipg = TIPG_WM_DFLT;
   12310 	else
   12311 		sc->sc_tipg = TIPG_LG_DFLT;
   12312 
   12313 	sc->sc_tbi_serdes_anegticks = 5;
   12314 
   12315 	/* Initialize our media structures */
   12316 	sc->sc_mii.mii_ifp = ifp;
   12317 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   12318 
   12319 	ifp->if_baudrate = IF_Gbps(1);
   12320 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   12321 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12322 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12323 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   12324 		    sc->sc_core_lock);
   12325 	} else {
   12326 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12327 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   12328 	}
   12329 
   12330 	/*
   12331 	 * SWD Pins:
   12332 	 *
   12333 	 *	0 = Link LED (output)
   12334 	 *	1 = Loss Of Signal (input)
   12335 	 */
   12336 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   12337 
   12338 	/* XXX Perhaps this is only for TBI */
   12339 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12340 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   12341 
   12342 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   12343 		sc->sc_ctrl &= ~CTRL_LRST;
   12344 
   12345 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12346 
   12347 #define	ADD(ss, mm, dd)							\
   12348 do {									\
   12349 	aprint_normal("%s%s", sep, ss);					\
   12350 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   12351 	sep = ", ";							\
   12352 } while (/*CONSTCOND*/0)
   12353 
   12354 	aprint_normal_dev(sc->sc_dev, "");
   12355 
   12356 	if (sc->sc_type == WM_T_I354) {
   12357 		uint32_t status;
   12358 
   12359 		status = CSR_READ(sc, WMREG_STATUS);
   12360 		if (((status & STATUS_2P5_SKU) != 0)
   12361 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12362 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   12363 		} else
   12364 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   12365 	} else if (sc->sc_type == WM_T_82545) {
   12366 		/* Only 82545 is LX (XXX except SFP) */
   12367 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12368 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12369 	} else if (sc->sc_sfptype != 0) {
   12370 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   12371 		switch (sc->sc_sfptype) {
   12372 		default:
   12373 		case SFF_SFP_ETH_FLAGS_1000SX:
   12374 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12375 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12376 			break;
   12377 		case SFF_SFP_ETH_FLAGS_1000LX:
   12378 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12379 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12380 			break;
   12381 		case SFF_SFP_ETH_FLAGS_1000CX:
   12382 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   12383 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   12384 			break;
   12385 		case SFF_SFP_ETH_FLAGS_1000T:
   12386 			ADD("1000baseT", IFM_1000_T, 0);
   12387 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   12388 			break;
   12389 		case SFF_SFP_ETH_FLAGS_100FX:
   12390 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   12391 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   12392 			break;
   12393 		}
   12394 	} else {
   12395 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12396 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12397 	}
   12398 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   12399 	aprint_normal("\n");
   12400 
   12401 #undef ADD
   12402 
   12403 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   12404 }
   12405 
   12406 /*
   12407  * wm_tbi_mediachange:	[ifmedia interface function]
   12408  *
   12409  *	Set hardware to newly-selected media on a 1000BASE-X device.
   12410  */
   12411 static int
   12412 wm_tbi_mediachange(struct ifnet *ifp)
   12413 {
   12414 	struct wm_softc *sc = ifp->if_softc;
   12415 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12416 	uint32_t status, ctrl;
   12417 	bool signal;
   12418 	int i;
   12419 
   12420 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   12421 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12422 		/* XXX need some work for >= 82571 and < 82575 */
   12423 		if (sc->sc_type < WM_T_82575)
   12424 			return 0;
   12425 	}
   12426 
   12427 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12428 	    || (sc->sc_type >= WM_T_82575))
   12429 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12430 
   12431 	sc->sc_ctrl &= ~CTRL_LRST;
   12432 	sc->sc_txcw = TXCW_ANE;
   12433 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12434 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   12435 	else if (ife->ifm_media & IFM_FDX)
   12436 		sc->sc_txcw |= TXCW_FD;
   12437 	else
   12438 		sc->sc_txcw |= TXCW_HD;
   12439 
   12440 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   12441 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   12442 
   12443 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   12444 		device_xname(sc->sc_dev), sc->sc_txcw));
   12445 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12446 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12447 	CSR_WRITE_FLUSH(sc);
   12448 	delay(1000);
   12449 
   12450 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12451 	signal = wm_tbi_havesignal(sc, ctrl);
   12452 
   12453 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   12454 		signal));
   12455 
   12456 	if (signal) {
   12457 		/* Have signal; wait for the link to come up. */
   12458 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   12459 			delay(10000);
   12460 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   12461 				break;
   12462 		}
   12463 
   12464 		DPRINTF(sc, WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   12465 			device_xname(sc->sc_dev), i));
   12466 
   12467 		status = CSR_READ(sc, WMREG_STATUS);
   12468 		DPRINTF(sc, WM_DEBUG_LINK,
   12469 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   12470 			device_xname(sc->sc_dev), status, STATUS_LU));
   12471 		if (status & STATUS_LU) {
   12472 			/* Link is up. */
   12473 			DPRINTF(sc, WM_DEBUG_LINK,
   12474 			    ("%s: LINK: set media -> link up %s\n",
   12475 				device_xname(sc->sc_dev),
   12476 				(status & STATUS_FD) ? "FDX" : "HDX"));
   12477 
   12478 			/*
   12479 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   12480 			 * so we should update sc->sc_ctrl
   12481 			 */
   12482 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   12483 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12484 			sc->sc_fcrtl &= ~FCRTL_XONE;
   12485 			if (status & STATUS_FD)
   12486 				sc->sc_tctl |=
   12487 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12488 			else
   12489 				sc->sc_tctl |=
   12490 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12491 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   12492 				sc->sc_fcrtl |= FCRTL_XONE;
   12493 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12494 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12495 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12496 			sc->sc_tbi_linkup = 1;
   12497 		} else {
   12498 			if (i == WM_LINKUP_TIMEOUT)
   12499 				wm_check_for_link(sc);
   12500 			/* Link is down. */
   12501 			DPRINTF(sc, WM_DEBUG_LINK,
   12502 			    ("%s: LINK: set media -> link down\n",
   12503 				device_xname(sc->sc_dev)));
   12504 			sc->sc_tbi_linkup = 0;
   12505 		}
   12506 	} else {
   12507 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   12508 			device_xname(sc->sc_dev)));
   12509 		sc->sc_tbi_linkup = 0;
   12510 	}
   12511 
   12512 	wm_tbi_serdes_set_linkled(sc);
   12513 
   12514 	return 0;
   12515 }
   12516 
   12517 /*
   12518  * wm_tbi_mediastatus:	[ifmedia interface function]
   12519  *
   12520  *	Get the current interface media status on a 1000BASE-X device.
   12521  */
   12522 static void
   12523 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12524 {
   12525 	struct wm_softc *sc = ifp->if_softc;
   12526 	uint32_t ctrl, status;
   12527 
   12528 	ifmr->ifm_status = IFM_AVALID;
   12529 	ifmr->ifm_active = IFM_ETHER;
   12530 
   12531 	status = CSR_READ(sc, WMREG_STATUS);
   12532 	if ((status & STATUS_LU) == 0) {
   12533 		ifmr->ifm_active |= IFM_NONE;
   12534 		return;
   12535 	}
   12536 
   12537 	ifmr->ifm_status |= IFM_ACTIVE;
   12538 	/* Only 82545 is LX */
   12539 	if (sc->sc_type == WM_T_82545)
   12540 		ifmr->ifm_active |= IFM_1000_LX;
   12541 	else
   12542 		ifmr->ifm_active |= IFM_1000_SX;
   12543 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   12544 		ifmr->ifm_active |= IFM_FDX;
   12545 	else
   12546 		ifmr->ifm_active |= IFM_HDX;
   12547 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12548 	if (ctrl & CTRL_RFCE)
   12549 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   12550 	if (ctrl & CTRL_TFCE)
   12551 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   12552 }
   12553 
   12554 /* XXX TBI only */
   12555 static int
   12556 wm_check_for_link(struct wm_softc *sc)
   12557 {
   12558 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12559 	uint32_t rxcw;
   12560 	uint32_t ctrl;
   12561 	uint32_t status;
   12562 	bool signal;
   12563 
   12564 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
   12565 		device_xname(sc->sc_dev), __func__));
   12566 
   12567 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12568 		/* XXX need some work for >= 82571 */
   12569 		if (sc->sc_type >= WM_T_82571) {
   12570 			sc->sc_tbi_linkup = 1;
   12571 			return 0;
   12572 		}
   12573 	}
   12574 
   12575 	rxcw = CSR_READ(sc, WMREG_RXCW);
   12576 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12577 	status = CSR_READ(sc, WMREG_STATUS);
   12578 	signal = wm_tbi_havesignal(sc, ctrl);
   12579 
   12580 	DPRINTF(sc, WM_DEBUG_LINK,
   12581 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   12582 		device_xname(sc->sc_dev), __func__, signal,
   12583 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   12584 
   12585 	/*
   12586 	 * SWDPIN   LU RXCW
   12587 	 *	0    0	  0
   12588 	 *	0    0	  1	(should not happen)
   12589 	 *	0    1	  0	(should not happen)
   12590 	 *	0    1	  1	(should not happen)
   12591 	 *	1    0	  0	Disable autonego and force linkup
   12592 	 *	1    0	  1	got /C/ but not linkup yet
   12593 	 *	1    1	  0	(linkup)
   12594 	 *	1    1	  1	If IFM_AUTO, back to autonego
   12595 	 *
   12596 	 */
   12597 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   12598 		DPRINTF(sc, WM_DEBUG_LINK,
   12599 		    ("%s: %s: force linkup and fullduplex\n",
   12600 			device_xname(sc->sc_dev), __func__));
   12601 		sc->sc_tbi_linkup = 0;
   12602 		/* Disable auto-negotiation in the TXCW register */
   12603 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   12604 
   12605 		/*
   12606 		 * Force link-up and also force full-duplex.
   12607 		 *
   12608 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   12609 		 * so we should update sc->sc_ctrl
   12610 		 */
   12611 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   12612 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12613 	} else if (((status & STATUS_LU) != 0)
   12614 	    && ((rxcw & RXCW_C) != 0)
   12615 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12616 		sc->sc_tbi_linkup = 1;
   12617 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12618 			device_xname(sc->sc_dev),
   12619 			__func__));
   12620 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12621 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12622 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   12623 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
   12624 			device_xname(sc->sc_dev), __func__));
   12625 	} else {
   12626 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12627 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12628 			status));
   12629 	}
   12630 
   12631 	return 0;
   12632 }
   12633 
   12634 /*
   12635  * wm_tbi_tick:
   12636  *
   12637  *	Check the link on TBI devices.
   12638  *	This function acts as mii_tick().
   12639  */
   12640 static void
   12641 wm_tbi_tick(struct wm_softc *sc)
   12642 {
   12643 	struct mii_data *mii = &sc->sc_mii;
   12644 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12645 	uint32_t status;
   12646 
   12647 	KASSERT(WM_CORE_LOCKED(sc));
   12648 
   12649 	status = CSR_READ(sc, WMREG_STATUS);
   12650 
   12651 	/* XXX is this needed? */
   12652 	(void)CSR_READ(sc, WMREG_RXCW);
   12653 	(void)CSR_READ(sc, WMREG_CTRL);
   12654 
   12655 	/* set link status */
   12656 	if ((status & STATUS_LU) == 0) {
   12657 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12658 			device_xname(sc->sc_dev)));
   12659 		sc->sc_tbi_linkup = 0;
   12660 	} else if (sc->sc_tbi_linkup == 0) {
   12661 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12662 			device_xname(sc->sc_dev),
   12663 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12664 		sc->sc_tbi_linkup = 1;
   12665 		sc->sc_tbi_serdes_ticks = 0;
   12666 	}
   12667 
   12668 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12669 		goto setled;
   12670 
   12671 	if ((status & STATUS_LU) == 0) {
   12672 		sc->sc_tbi_linkup = 0;
   12673 		/* If the timer expired, retry autonegotiation */
   12674 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12675 		    && (++sc->sc_tbi_serdes_ticks
   12676 			>= sc->sc_tbi_serdes_anegticks)) {
   12677 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12678 				device_xname(sc->sc_dev), __func__));
   12679 			sc->sc_tbi_serdes_ticks = 0;
   12680 			/*
   12681 			 * Reset the link, and let autonegotiation do
   12682 			 * its thing
   12683 			 */
   12684 			sc->sc_ctrl |= CTRL_LRST;
   12685 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12686 			CSR_WRITE_FLUSH(sc);
   12687 			delay(1000);
   12688 			sc->sc_ctrl &= ~CTRL_LRST;
   12689 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12690 			CSR_WRITE_FLUSH(sc);
   12691 			delay(1000);
   12692 			CSR_WRITE(sc, WMREG_TXCW,
   12693 			    sc->sc_txcw & ~TXCW_ANE);
   12694 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12695 		}
   12696 	}
   12697 
   12698 setled:
   12699 	wm_tbi_serdes_set_linkled(sc);
   12700 }
   12701 
   12702 /* SERDES related */
   12703 static void
   12704 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12705 {
   12706 	uint32_t reg;
   12707 
   12708 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12709 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12710 		return;
   12711 
   12712 	/* Enable PCS to turn on link */
   12713 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12714 	reg |= PCS_CFG_PCS_EN;
   12715 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12716 
   12717 	/* Power up the laser */
   12718 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12719 	reg &= ~CTRL_EXT_SWDPIN(3);
   12720 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12721 
   12722 	/* Flush the write to verify completion */
   12723 	CSR_WRITE_FLUSH(sc);
   12724 	delay(1000);
   12725 }
   12726 
   12727 static int
   12728 wm_serdes_mediachange(struct ifnet *ifp)
   12729 {
   12730 	struct wm_softc *sc = ifp->if_softc;
   12731 	bool pcs_autoneg = true; /* XXX */
   12732 	uint32_t ctrl_ext, pcs_lctl, reg;
   12733 
   12734 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12735 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12736 		return 0;
   12737 
   12738 	/* XXX Currently, this function is not called on 8257[12] */
   12739 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12740 	    || (sc->sc_type >= WM_T_82575))
   12741 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12742 
   12743 	/* Power on the sfp cage if present */
   12744 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12745 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12746 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   12747 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12748 
   12749 	sc->sc_ctrl |= CTRL_SLU;
   12750 
   12751 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   12752 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12753 
   12754 		reg = CSR_READ(sc, WMREG_CONNSW);
   12755 		reg |= CONNSW_ENRGSRC;
   12756 		CSR_WRITE(sc, WMREG_CONNSW, reg);
   12757 	}
   12758 
   12759 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12760 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12761 	case CTRL_EXT_LINK_MODE_SGMII:
   12762 		/* SGMII mode lets the phy handle forcing speed/duplex */
   12763 		pcs_autoneg = true;
   12764 		/* Autoneg time out should be disabled for SGMII mode */
   12765 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12766 		break;
   12767 	case CTRL_EXT_LINK_MODE_1000KX:
   12768 		pcs_autoneg = false;
   12769 		/* FALLTHROUGH */
   12770 	default:
   12771 		if ((sc->sc_type == WM_T_82575)
   12772 		    || (sc->sc_type == WM_T_82576)) {
   12773 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12774 				pcs_autoneg = false;
   12775 		}
   12776 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12777 		    | CTRL_FRCFDX;
   12778 
   12779 		/* Set speed of 1000/Full if speed/duplex is forced */
   12780 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12781 	}
   12782 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12783 
   12784 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   12785 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   12786 
   12787 	if (pcs_autoneg) {
   12788 		/* Set PCS register for autoneg */
   12789 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12790 
   12791 		/* Disable force flow control for autoneg */
   12792 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12793 
   12794 		/* Configure flow control advertisement for autoneg */
   12795 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12796 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12797 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12798 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12799 	} else
   12800 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12801 
   12802 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12803 
   12804 	return 0;
   12805 }
   12806 
   12807 static void
   12808 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12809 {
   12810 	struct wm_softc *sc = ifp->if_softc;
   12811 	struct mii_data *mii = &sc->sc_mii;
   12812 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12813 	uint32_t pcs_adv, pcs_lpab, reg;
   12814 
   12815 	ifmr->ifm_status = IFM_AVALID;
   12816 	ifmr->ifm_active = IFM_ETHER;
   12817 
   12818 	/* Check PCS */
   12819 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12820 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12821 		ifmr->ifm_active |= IFM_NONE;
   12822 		sc->sc_tbi_linkup = 0;
   12823 		goto setled;
   12824 	}
   12825 
   12826 	sc->sc_tbi_linkup = 1;
   12827 	ifmr->ifm_status |= IFM_ACTIVE;
   12828 	if (sc->sc_type == WM_T_I354) {
   12829 		uint32_t status;
   12830 
   12831 		status = CSR_READ(sc, WMREG_STATUS);
   12832 		if (((status & STATUS_2P5_SKU) != 0)
   12833 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12834 			ifmr->ifm_active |= IFM_2500_KX;
   12835 		} else
   12836 			ifmr->ifm_active |= IFM_1000_KX;
   12837 	} else {
   12838 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12839 		case PCS_LSTS_SPEED_10:
   12840 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12841 			break;
   12842 		case PCS_LSTS_SPEED_100:
   12843 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12844 			break;
   12845 		case PCS_LSTS_SPEED_1000:
   12846 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12847 			break;
   12848 		default:
   12849 			device_printf(sc->sc_dev, "Unknown speed\n");
   12850 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12851 			break;
   12852 		}
   12853 	}
   12854 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   12855 	if ((reg & PCS_LSTS_FDX) != 0)
   12856 		ifmr->ifm_active |= IFM_FDX;
   12857 	else
   12858 		ifmr->ifm_active |= IFM_HDX;
   12859 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12860 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12861 		/* Check flow */
   12862 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12863 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12864 			DPRINTF(sc, WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12865 			goto setled;
   12866 		}
   12867 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12868 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12869 		DPRINTF(sc, WM_DEBUG_LINK,
   12870 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12871 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12872 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12873 			mii->mii_media_active |= IFM_FLOW
   12874 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12875 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12876 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12877 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12878 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12879 			mii->mii_media_active |= IFM_FLOW
   12880 			    | IFM_ETH_TXPAUSE;
   12881 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12882 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12883 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12884 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12885 			mii->mii_media_active |= IFM_FLOW
   12886 			    | IFM_ETH_RXPAUSE;
   12887 		}
   12888 	}
   12889 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12890 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12891 setled:
   12892 	wm_tbi_serdes_set_linkled(sc);
   12893 }
   12894 
   12895 /*
   12896  * wm_serdes_tick:
   12897  *
   12898  *	Check the link on serdes devices.
   12899  */
   12900 static void
   12901 wm_serdes_tick(struct wm_softc *sc)
   12902 {
   12903 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12904 	struct mii_data *mii = &sc->sc_mii;
   12905 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12906 	uint32_t reg;
   12907 
   12908 	KASSERT(WM_CORE_LOCKED(sc));
   12909 
   12910 	mii->mii_media_status = IFM_AVALID;
   12911 	mii->mii_media_active = IFM_ETHER;
   12912 
   12913 	/* Check PCS */
   12914 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12915 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12916 		mii->mii_media_status |= IFM_ACTIVE;
   12917 		sc->sc_tbi_linkup = 1;
   12918 		sc->sc_tbi_serdes_ticks = 0;
   12919 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12920 		if ((reg & PCS_LSTS_FDX) != 0)
   12921 			mii->mii_media_active |= IFM_FDX;
   12922 		else
   12923 			mii->mii_media_active |= IFM_HDX;
   12924 	} else {
   12925 		mii->mii_media_status |= IFM_NONE;
   12926 		sc->sc_tbi_linkup = 0;
   12927 		/* If the timer expired, retry autonegotiation */
   12928 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12929 		    && (++sc->sc_tbi_serdes_ticks
   12930 			>= sc->sc_tbi_serdes_anegticks)) {
   12931 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12932 				device_xname(sc->sc_dev), __func__));
   12933 			sc->sc_tbi_serdes_ticks = 0;
   12934 			/* XXX */
   12935 			wm_serdes_mediachange(ifp);
   12936 		}
   12937 	}
   12938 
   12939 	wm_tbi_serdes_set_linkled(sc);
   12940 }
   12941 
   12942 /* SFP related */
   12943 
   12944 static int
   12945 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12946 {
   12947 	uint32_t i2ccmd;
   12948 	int i;
   12949 
   12950 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12951 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12952 
   12953 	/* Poll the ready bit */
   12954 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12955 		delay(50);
   12956 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12957 		if (i2ccmd & I2CCMD_READY)
   12958 			break;
   12959 	}
   12960 	if ((i2ccmd & I2CCMD_READY) == 0)
   12961 		return -1;
   12962 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12963 		return -1;
   12964 
   12965 	*data = i2ccmd & 0x00ff;
   12966 
   12967 	return 0;
   12968 }
   12969 
   12970 static uint32_t
   12971 wm_sfp_get_media_type(struct wm_softc *sc)
   12972 {
   12973 	uint32_t ctrl_ext;
   12974 	uint8_t val = 0;
   12975 	int timeout = 3;
   12976 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12977 	int rv = -1;
   12978 
   12979 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12980 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12981 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12982 	CSR_WRITE_FLUSH(sc);
   12983 
   12984 	/* Read SFP module data */
   12985 	while (timeout) {
   12986 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12987 		if (rv == 0)
   12988 			break;
   12989 		delay(100*1000); /* XXX too big */
   12990 		timeout--;
   12991 	}
   12992 	if (rv != 0)
   12993 		goto out;
   12994 
   12995 	switch (val) {
   12996 	case SFF_SFP_ID_SFF:
   12997 		aprint_normal_dev(sc->sc_dev,
   12998 		    "Module/Connector soldered to board\n");
   12999 		break;
   13000 	case SFF_SFP_ID_SFP:
   13001 		sc->sc_flags |= WM_F_SFP;
   13002 		break;
   13003 	case SFF_SFP_ID_UNKNOWN:
   13004 		goto out;
   13005 	default:
   13006 		break;
   13007 	}
   13008 
   13009 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   13010 	if (rv != 0)
   13011 		goto out;
   13012 
   13013 	sc->sc_sfptype = val;
   13014 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   13015 		mediatype = WM_MEDIATYPE_SERDES;
   13016 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   13017 		sc->sc_flags |= WM_F_SGMII;
   13018 		mediatype = WM_MEDIATYPE_COPPER;
   13019 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   13020 		sc->sc_flags |= WM_F_SGMII;
   13021 		mediatype = WM_MEDIATYPE_SERDES;
   13022 	} else {
   13023 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   13024 		    __func__, sc->sc_sfptype);
   13025 		sc->sc_sfptype = 0; /* XXX unknown */
   13026 	}
   13027 
   13028 out:
   13029 	/* Restore I2C interface setting */
   13030 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13031 
   13032 	return mediatype;
   13033 }
   13034 
   13035 /*
   13036  * NVM related.
   13037  * Microwire, SPI (w/wo EERD) and Flash.
   13038  */
   13039 
   13040 /* Both spi and uwire */
   13041 
   13042 /*
   13043  * wm_eeprom_sendbits:
   13044  *
   13045  *	Send a series of bits to the EEPROM.
   13046  */
   13047 static void
   13048 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   13049 {
   13050 	uint32_t reg;
   13051 	int x;
   13052 
   13053 	reg = CSR_READ(sc, WMREG_EECD);
   13054 
   13055 	for (x = nbits; x > 0; x--) {
   13056 		if (bits & (1U << (x - 1)))
   13057 			reg |= EECD_DI;
   13058 		else
   13059 			reg &= ~EECD_DI;
   13060 		CSR_WRITE(sc, WMREG_EECD, reg);
   13061 		CSR_WRITE_FLUSH(sc);
   13062 		delay(2);
   13063 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13064 		CSR_WRITE_FLUSH(sc);
   13065 		delay(2);
   13066 		CSR_WRITE(sc, WMREG_EECD, reg);
   13067 		CSR_WRITE_FLUSH(sc);
   13068 		delay(2);
   13069 	}
   13070 }
   13071 
   13072 /*
   13073  * wm_eeprom_recvbits:
   13074  *
   13075  *	Receive a series of bits from the EEPROM.
   13076  */
   13077 static void
   13078 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   13079 {
   13080 	uint32_t reg, val;
   13081 	int x;
   13082 
   13083 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   13084 
   13085 	val = 0;
   13086 	for (x = nbits; x > 0; x--) {
   13087 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13088 		CSR_WRITE_FLUSH(sc);
   13089 		delay(2);
   13090 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   13091 			val |= (1U << (x - 1));
   13092 		CSR_WRITE(sc, WMREG_EECD, reg);
   13093 		CSR_WRITE_FLUSH(sc);
   13094 		delay(2);
   13095 	}
   13096 	*valp = val;
   13097 }
   13098 
   13099 /* Microwire */
   13100 
   13101 /*
   13102  * wm_nvm_read_uwire:
   13103  *
   13104  *	Read a word from the EEPROM using the MicroWire protocol.
   13105  */
   13106 static int
   13107 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13108 {
   13109 	uint32_t reg, val;
   13110 	int i;
   13111 
   13112 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13113 		device_xname(sc->sc_dev), __func__));
   13114 
   13115 	if (sc->nvm.acquire(sc) != 0)
   13116 		return -1;
   13117 
   13118 	for (i = 0; i < wordcnt; i++) {
   13119 		/* Clear SK and DI. */
   13120 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   13121 		CSR_WRITE(sc, WMREG_EECD, reg);
   13122 
   13123 		/*
   13124 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   13125 		 * and Xen.
   13126 		 *
   13127 		 * We use this workaround only for 82540 because qemu's
   13128 		 * e1000 act as 82540.
   13129 		 */
   13130 		if (sc->sc_type == WM_T_82540) {
   13131 			reg |= EECD_SK;
   13132 			CSR_WRITE(sc, WMREG_EECD, reg);
   13133 			reg &= ~EECD_SK;
   13134 			CSR_WRITE(sc, WMREG_EECD, reg);
   13135 			CSR_WRITE_FLUSH(sc);
   13136 			delay(2);
   13137 		}
   13138 		/* XXX: end of workaround */
   13139 
   13140 		/* Set CHIP SELECT. */
   13141 		reg |= EECD_CS;
   13142 		CSR_WRITE(sc, WMREG_EECD, reg);
   13143 		CSR_WRITE_FLUSH(sc);
   13144 		delay(2);
   13145 
   13146 		/* Shift in the READ command. */
   13147 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   13148 
   13149 		/* Shift in address. */
   13150 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   13151 
   13152 		/* Shift out the data. */
   13153 		wm_eeprom_recvbits(sc, &val, 16);
   13154 		data[i] = val & 0xffff;
   13155 
   13156 		/* Clear CHIP SELECT. */
   13157 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   13158 		CSR_WRITE(sc, WMREG_EECD, reg);
   13159 		CSR_WRITE_FLUSH(sc);
   13160 		delay(2);
   13161 	}
   13162 
   13163 	sc->nvm.release(sc);
   13164 	return 0;
   13165 }
   13166 
   13167 /* SPI */
   13168 
   13169 /*
   13170  * Set SPI and FLASH related information from the EECD register.
   13171  * For 82541 and 82547, the word size is taken from EEPROM.
   13172  */
   13173 static int
   13174 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   13175 {
   13176 	int size;
   13177 	uint32_t reg;
   13178 	uint16_t data;
   13179 
   13180 	reg = CSR_READ(sc, WMREG_EECD);
   13181 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   13182 
   13183 	/* Read the size of NVM from EECD by default */
   13184 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13185 	switch (sc->sc_type) {
   13186 	case WM_T_82541:
   13187 	case WM_T_82541_2:
   13188 	case WM_T_82547:
   13189 	case WM_T_82547_2:
   13190 		/* Set dummy value to access EEPROM */
   13191 		sc->sc_nvm_wordsize = 64;
   13192 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   13193 			aprint_error_dev(sc->sc_dev,
   13194 			    "%s: failed to read EEPROM size\n", __func__);
   13195 		}
   13196 		reg = data;
   13197 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13198 		if (size == 0)
   13199 			size = 6; /* 64 word size */
   13200 		else
   13201 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   13202 		break;
   13203 	case WM_T_80003:
   13204 	case WM_T_82571:
   13205 	case WM_T_82572:
   13206 	case WM_T_82573: /* SPI case */
   13207 	case WM_T_82574: /* SPI case */
   13208 	case WM_T_82583: /* SPI case */
   13209 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13210 		if (size > 14)
   13211 			size = 14;
   13212 		break;
   13213 	case WM_T_82575:
   13214 	case WM_T_82576:
   13215 	case WM_T_82580:
   13216 	case WM_T_I350:
   13217 	case WM_T_I354:
   13218 	case WM_T_I210:
   13219 	case WM_T_I211:
   13220 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13221 		if (size > 15)
   13222 			size = 15;
   13223 		break;
   13224 	default:
   13225 		aprint_error_dev(sc->sc_dev,
   13226 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   13227 		return -1;
   13228 		break;
   13229 	}
   13230 
   13231 	sc->sc_nvm_wordsize = 1 << size;
   13232 
   13233 	return 0;
   13234 }
   13235 
   13236 /*
   13237  * wm_nvm_ready_spi:
   13238  *
   13239  *	Wait for a SPI EEPROM to be ready for commands.
   13240  */
   13241 static int
   13242 wm_nvm_ready_spi(struct wm_softc *sc)
   13243 {
   13244 	uint32_t val;
   13245 	int usec;
   13246 
   13247 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13248 		device_xname(sc->sc_dev), __func__));
   13249 
   13250 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   13251 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   13252 		wm_eeprom_recvbits(sc, &val, 8);
   13253 		if ((val & SPI_SR_RDY) == 0)
   13254 			break;
   13255 	}
   13256 	if (usec >= SPI_MAX_RETRIES) {
   13257 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   13258 		return -1;
   13259 	}
   13260 	return 0;
   13261 }
   13262 
   13263 /*
   13264  * wm_nvm_read_spi:
   13265  *
   13266  *	Read a work from the EEPROM using the SPI protocol.
   13267  */
   13268 static int
   13269 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13270 {
   13271 	uint32_t reg, val;
   13272 	int i;
   13273 	uint8_t opc;
   13274 	int rv = 0;
   13275 
   13276 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13277 		device_xname(sc->sc_dev), __func__));
   13278 
   13279 	if (sc->nvm.acquire(sc) != 0)
   13280 		return -1;
   13281 
   13282 	/* Clear SK and CS. */
   13283 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   13284 	CSR_WRITE(sc, WMREG_EECD, reg);
   13285 	CSR_WRITE_FLUSH(sc);
   13286 	delay(2);
   13287 
   13288 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   13289 		goto out;
   13290 
   13291 	/* Toggle CS to flush commands. */
   13292 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   13293 	CSR_WRITE_FLUSH(sc);
   13294 	delay(2);
   13295 	CSR_WRITE(sc, WMREG_EECD, reg);
   13296 	CSR_WRITE_FLUSH(sc);
   13297 	delay(2);
   13298 
   13299 	opc = SPI_OPC_READ;
   13300 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   13301 		opc |= SPI_OPC_A8;
   13302 
   13303 	wm_eeprom_sendbits(sc, opc, 8);
   13304 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   13305 
   13306 	for (i = 0; i < wordcnt; i++) {
   13307 		wm_eeprom_recvbits(sc, &val, 16);
   13308 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   13309 	}
   13310 
   13311 	/* Raise CS and clear SK. */
   13312 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   13313 	CSR_WRITE(sc, WMREG_EECD, reg);
   13314 	CSR_WRITE_FLUSH(sc);
   13315 	delay(2);
   13316 
   13317 out:
   13318 	sc->nvm.release(sc);
   13319 	return rv;
   13320 }
   13321 
   13322 /* Using with EERD */
   13323 
   13324 static int
   13325 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   13326 {
   13327 	uint32_t attempts = 100000;
   13328 	uint32_t i, reg = 0;
   13329 	int32_t done = -1;
   13330 
   13331 	for (i = 0; i < attempts; i++) {
   13332 		reg = CSR_READ(sc, rw);
   13333 
   13334 		if (reg & EERD_DONE) {
   13335 			done = 0;
   13336 			break;
   13337 		}
   13338 		delay(5);
   13339 	}
   13340 
   13341 	return done;
   13342 }
   13343 
   13344 static int
   13345 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   13346 {
   13347 	int i, eerd = 0;
   13348 	int rv = 0;
   13349 
   13350 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13351 		device_xname(sc->sc_dev), __func__));
   13352 
   13353 	if (sc->nvm.acquire(sc) != 0)
   13354 		return -1;
   13355 
   13356 	for (i = 0; i < wordcnt; i++) {
   13357 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   13358 		CSR_WRITE(sc, WMREG_EERD, eerd);
   13359 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   13360 		if (rv != 0) {
   13361 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   13362 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   13363 			break;
   13364 		}
   13365 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   13366 	}
   13367 
   13368 	sc->nvm.release(sc);
   13369 	return rv;
   13370 }
   13371 
   13372 /* Flash */
   13373 
   13374 static int
   13375 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   13376 {
   13377 	uint32_t eecd;
   13378 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   13379 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   13380 	uint32_t nvm_dword = 0;
   13381 	uint8_t sig_byte = 0;
   13382 	int rv;
   13383 
   13384 	switch (sc->sc_type) {
   13385 	case WM_T_PCH_SPT:
   13386 	case WM_T_PCH_CNP:
   13387 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   13388 		act_offset = ICH_NVM_SIG_WORD * 2;
   13389 
   13390 		/* Set bank to 0 in case flash read fails. */
   13391 		*bank = 0;
   13392 
   13393 		/* Check bank 0 */
   13394 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   13395 		if (rv != 0)
   13396 			return rv;
   13397 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13398 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13399 			*bank = 0;
   13400 			return 0;
   13401 		}
   13402 
   13403 		/* Check bank 1 */
   13404 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   13405 		    &nvm_dword);
   13406 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13407 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13408 			*bank = 1;
   13409 			return 0;
   13410 		}
   13411 		aprint_error_dev(sc->sc_dev,
   13412 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   13413 		return -1;
   13414 	case WM_T_ICH8:
   13415 	case WM_T_ICH9:
   13416 		eecd = CSR_READ(sc, WMREG_EECD);
   13417 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   13418 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   13419 			return 0;
   13420 		}
   13421 		/* FALLTHROUGH */
   13422 	default:
   13423 		/* Default to 0 */
   13424 		*bank = 0;
   13425 
   13426 		/* Check bank 0 */
   13427 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   13428 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13429 			*bank = 0;
   13430 			return 0;
   13431 		}
   13432 
   13433 		/* Check bank 1 */
   13434 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   13435 		    &sig_byte);
   13436 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13437 			*bank = 1;
   13438 			return 0;
   13439 		}
   13440 	}
   13441 
   13442 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   13443 		device_xname(sc->sc_dev)));
   13444 	return -1;
   13445 }
   13446 
   13447 /******************************************************************************
   13448  * This function does initial flash setup so that a new read/write/erase cycle
   13449  * can be started.
   13450  *
   13451  * sc - The pointer to the hw structure
   13452  ****************************************************************************/
   13453 static int32_t
   13454 wm_ich8_cycle_init(struct wm_softc *sc)
   13455 {
   13456 	uint16_t hsfsts;
   13457 	int32_t error = 1;
   13458 	int32_t i     = 0;
   13459 
   13460 	if (sc->sc_type >= WM_T_PCH_SPT)
   13461 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   13462 	else
   13463 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13464 
   13465 	/* May be check the Flash Des Valid bit in Hw status */
   13466 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   13467 		return error;
   13468 
   13469 	/* Clear FCERR in Hw status by writing 1 */
   13470 	/* Clear DAEL in Hw status by writing a 1 */
   13471 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   13472 
   13473 	if (sc->sc_type >= WM_T_PCH_SPT)
   13474 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   13475 	else
   13476 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13477 
   13478 	/*
   13479 	 * Either we should have a hardware SPI cycle in progress bit to check
   13480 	 * against, in order to start a new cycle or FDONE bit should be
   13481 	 * changed in the hardware so that it is 1 after hardware reset, which
   13482 	 * can then be used as an indication whether a cycle is in progress or
   13483 	 * has been completed .. we should also have some software semaphore
   13484 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   13485 	 * threads access to those bits can be sequentiallized or a way so that
   13486 	 * 2 threads don't start the cycle at the same time
   13487 	 */
   13488 
   13489 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13490 		/*
   13491 		 * There is no cycle running at present, so we can start a
   13492 		 * cycle
   13493 		 */
   13494 
   13495 		/* Begin by setting Flash Cycle Done. */
   13496 		hsfsts |= HSFSTS_DONE;
   13497 		if (sc->sc_type >= WM_T_PCH_SPT)
   13498 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13499 			    hsfsts & 0xffffUL);
   13500 		else
   13501 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13502 		error = 0;
   13503 	} else {
   13504 		/*
   13505 		 * Otherwise poll for sometime so the current cycle has a
   13506 		 * chance to end before giving up.
   13507 		 */
   13508 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   13509 			if (sc->sc_type >= WM_T_PCH_SPT)
   13510 				hsfsts = ICH8_FLASH_READ32(sc,
   13511 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13512 			else
   13513 				hsfsts = ICH8_FLASH_READ16(sc,
   13514 				    ICH_FLASH_HSFSTS);
   13515 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13516 				error = 0;
   13517 				break;
   13518 			}
   13519 			delay(1);
   13520 		}
   13521 		if (error == 0) {
   13522 			/*
   13523 			 * Successful in waiting for previous cycle to timeout,
   13524 			 * now set the Flash Cycle Done.
   13525 			 */
   13526 			hsfsts |= HSFSTS_DONE;
   13527 			if (sc->sc_type >= WM_T_PCH_SPT)
   13528 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13529 				    hsfsts & 0xffffUL);
   13530 			else
   13531 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   13532 				    hsfsts);
   13533 		}
   13534 	}
   13535 	return error;
   13536 }
   13537 
   13538 /******************************************************************************
   13539  * This function starts a flash cycle and waits for its completion
   13540  *
   13541  * sc - The pointer to the hw structure
   13542  ****************************************************************************/
   13543 static int32_t
   13544 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   13545 {
   13546 	uint16_t hsflctl;
   13547 	uint16_t hsfsts;
   13548 	int32_t error = 1;
   13549 	uint32_t i = 0;
   13550 
   13551 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   13552 	if (sc->sc_type >= WM_T_PCH_SPT)
   13553 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   13554 	else
   13555 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13556 	hsflctl |= HSFCTL_GO;
   13557 	if (sc->sc_type >= WM_T_PCH_SPT)
   13558 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13559 		    (uint32_t)hsflctl << 16);
   13560 	else
   13561 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13562 
   13563 	/* Wait till FDONE bit is set to 1 */
   13564 	do {
   13565 		if (sc->sc_type >= WM_T_PCH_SPT)
   13566 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13567 			    & 0xffffUL;
   13568 		else
   13569 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13570 		if (hsfsts & HSFSTS_DONE)
   13571 			break;
   13572 		delay(1);
   13573 		i++;
   13574 	} while (i < timeout);
   13575 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   13576 		error = 0;
   13577 
   13578 	return error;
   13579 }
   13580 
   13581 /******************************************************************************
   13582  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   13583  *
   13584  * sc - The pointer to the hw structure
   13585  * index - The index of the byte or word to read.
   13586  * size - Size of data to read, 1=byte 2=word, 4=dword
   13587  * data - Pointer to the word to store the value read.
   13588  *****************************************************************************/
   13589 static int32_t
   13590 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   13591     uint32_t size, uint32_t *data)
   13592 {
   13593 	uint16_t hsfsts;
   13594 	uint16_t hsflctl;
   13595 	uint32_t flash_linear_address;
   13596 	uint32_t flash_data = 0;
   13597 	int32_t error = 1;
   13598 	int32_t count = 0;
   13599 
   13600 	if (size < 1  || size > 4 || data == 0x0 ||
   13601 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   13602 		return error;
   13603 
   13604 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   13605 	    sc->sc_ich8_flash_base;
   13606 
   13607 	do {
   13608 		delay(1);
   13609 		/* Steps */
   13610 		error = wm_ich8_cycle_init(sc);
   13611 		if (error)
   13612 			break;
   13613 
   13614 		if (sc->sc_type >= WM_T_PCH_SPT)
   13615 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13616 			    >> 16;
   13617 		else
   13618 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13619 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   13620 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   13621 		    & HSFCTL_BCOUNT_MASK;
   13622 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   13623 		if (sc->sc_type >= WM_T_PCH_SPT) {
   13624 			/*
   13625 			 * In SPT, This register is in Lan memory space, not
   13626 			 * flash. Therefore, only 32 bit access is supported.
   13627 			 */
   13628 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13629 			    (uint32_t)hsflctl << 16);
   13630 		} else
   13631 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13632 
   13633 		/*
   13634 		 * Write the last 24 bits of index into Flash Linear address
   13635 		 * field in Flash Address
   13636 		 */
   13637 		/* TODO: TBD maybe check the index against the size of flash */
   13638 
   13639 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   13640 
   13641 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   13642 
   13643 		/*
   13644 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   13645 		 * the whole sequence a few more times, else read in (shift in)
   13646 		 * the Flash Data0, the order is least significant byte first
   13647 		 * msb to lsb
   13648 		 */
   13649 		if (error == 0) {
   13650 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13651 			if (size == 1)
   13652 				*data = (uint8_t)(flash_data & 0x000000FF);
   13653 			else if (size == 2)
   13654 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13655 			else if (size == 4)
   13656 				*data = (uint32_t)flash_data;
   13657 			break;
   13658 		} else {
   13659 			/*
   13660 			 * If we've gotten here, then things are probably
   13661 			 * completely hosed, but if the error condition is
   13662 			 * detected, it won't hurt to give it another try...
   13663 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13664 			 */
   13665 			if (sc->sc_type >= WM_T_PCH_SPT)
   13666 				hsfsts = ICH8_FLASH_READ32(sc,
   13667 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13668 			else
   13669 				hsfsts = ICH8_FLASH_READ16(sc,
   13670 				    ICH_FLASH_HSFSTS);
   13671 
   13672 			if (hsfsts & HSFSTS_ERR) {
   13673 				/* Repeat for some time before giving up. */
   13674 				continue;
   13675 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13676 				break;
   13677 		}
   13678 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13679 
   13680 	return error;
   13681 }
   13682 
   13683 /******************************************************************************
   13684  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13685  *
   13686  * sc - pointer to wm_hw structure
   13687  * index - The index of the byte to read.
   13688  * data - Pointer to a byte to store the value read.
   13689  *****************************************************************************/
   13690 static int32_t
   13691 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13692 {
   13693 	int32_t status;
   13694 	uint32_t word = 0;
   13695 
   13696 	status = wm_read_ich8_data(sc, index, 1, &word);
   13697 	if (status == 0)
   13698 		*data = (uint8_t)word;
   13699 	else
   13700 		*data = 0;
   13701 
   13702 	return status;
   13703 }
   13704 
   13705 /******************************************************************************
   13706  * Reads a word from the NVM using the ICH8 flash access registers.
   13707  *
   13708  * sc - pointer to wm_hw structure
   13709  * index - The starting byte index of the word to read.
   13710  * data - Pointer to a word to store the value read.
   13711  *****************************************************************************/
   13712 static int32_t
   13713 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13714 {
   13715 	int32_t status;
   13716 	uint32_t word = 0;
   13717 
   13718 	status = wm_read_ich8_data(sc, index, 2, &word);
   13719 	if (status == 0)
   13720 		*data = (uint16_t)word;
   13721 	else
   13722 		*data = 0;
   13723 
   13724 	return status;
   13725 }
   13726 
   13727 /******************************************************************************
   13728  * Reads a dword from the NVM using the ICH8 flash access registers.
   13729  *
   13730  * sc - pointer to wm_hw structure
   13731  * index - The starting byte index of the word to read.
   13732  * data - Pointer to a word to store the value read.
   13733  *****************************************************************************/
   13734 static int32_t
   13735 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13736 {
   13737 	int32_t status;
   13738 
   13739 	status = wm_read_ich8_data(sc, index, 4, data);
   13740 	return status;
   13741 }
   13742 
   13743 /******************************************************************************
   13744  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13745  * register.
   13746  *
   13747  * sc - Struct containing variables accessed by shared code
   13748  * offset - offset of word in the EEPROM to read
   13749  * data - word read from the EEPROM
   13750  * words - number of words to read
   13751  *****************************************************************************/
   13752 static int
   13753 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13754 {
   13755 	int32_t	 rv = 0;
   13756 	uint32_t flash_bank = 0;
   13757 	uint32_t act_offset = 0;
   13758 	uint32_t bank_offset = 0;
   13759 	uint16_t word = 0;
   13760 	uint16_t i = 0;
   13761 
   13762 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13763 		device_xname(sc->sc_dev), __func__));
   13764 
   13765 	if (sc->nvm.acquire(sc) != 0)
   13766 		return -1;
   13767 
   13768 	/*
   13769 	 * We need to know which is the valid flash bank.  In the event
   13770 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13771 	 * managing flash_bank. So it cannot be trusted and needs
   13772 	 * to be updated with each read.
   13773 	 */
   13774 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13775 	if (rv) {
   13776 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13777 			device_xname(sc->sc_dev)));
   13778 		flash_bank = 0;
   13779 	}
   13780 
   13781 	/*
   13782 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13783 	 * size
   13784 	 */
   13785 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13786 
   13787 	for (i = 0; i < words; i++) {
   13788 		/* The NVM part needs a byte offset, hence * 2 */
   13789 		act_offset = bank_offset + ((offset + i) * 2);
   13790 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13791 		if (rv) {
   13792 			aprint_error_dev(sc->sc_dev,
   13793 			    "%s: failed to read NVM\n", __func__);
   13794 			break;
   13795 		}
   13796 		data[i] = word;
   13797 	}
   13798 
   13799 	sc->nvm.release(sc);
   13800 	return rv;
   13801 }
   13802 
   13803 /******************************************************************************
   13804  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13805  * register.
   13806  *
   13807  * sc - Struct containing variables accessed by shared code
   13808  * offset - offset of word in the EEPROM to read
   13809  * data - word read from the EEPROM
   13810  * words - number of words to read
   13811  *****************************************************************************/
   13812 static int
   13813 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13814 {
   13815 	int32_t	 rv = 0;
   13816 	uint32_t flash_bank = 0;
   13817 	uint32_t act_offset = 0;
   13818 	uint32_t bank_offset = 0;
   13819 	uint32_t dword = 0;
   13820 	uint16_t i = 0;
   13821 
   13822 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13823 		device_xname(sc->sc_dev), __func__));
   13824 
   13825 	if (sc->nvm.acquire(sc) != 0)
   13826 		return -1;
   13827 
   13828 	/*
   13829 	 * We need to know which is the valid flash bank.  In the event
   13830 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13831 	 * managing flash_bank. So it cannot be trusted and needs
   13832 	 * to be updated with each read.
   13833 	 */
   13834 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13835 	if (rv) {
   13836 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13837 			device_xname(sc->sc_dev)));
   13838 		flash_bank = 0;
   13839 	}
   13840 
   13841 	/*
   13842 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13843 	 * size
   13844 	 */
   13845 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13846 
   13847 	for (i = 0; i < words; i++) {
   13848 		/* The NVM part needs a byte offset, hence * 2 */
   13849 		act_offset = bank_offset + ((offset + i) * 2);
   13850 		/* but we must read dword aligned, so mask ... */
   13851 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13852 		if (rv) {
   13853 			aprint_error_dev(sc->sc_dev,
   13854 			    "%s: failed to read NVM\n", __func__);
   13855 			break;
   13856 		}
   13857 		/* ... and pick out low or high word */
   13858 		if ((act_offset & 0x2) == 0)
   13859 			data[i] = (uint16_t)(dword & 0xFFFF);
   13860 		else
   13861 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13862 	}
   13863 
   13864 	sc->nvm.release(sc);
   13865 	return rv;
   13866 }
   13867 
   13868 /* iNVM */
   13869 
   13870 static int
   13871 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13872 {
   13873 	int32_t	 rv = 0;
   13874 	uint32_t invm_dword;
   13875 	uint16_t i;
   13876 	uint8_t record_type, word_address;
   13877 
   13878 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13879 		device_xname(sc->sc_dev), __func__));
   13880 
   13881 	for (i = 0; i < INVM_SIZE; i++) {
   13882 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13883 		/* Get record type */
   13884 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13885 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13886 			break;
   13887 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13888 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13889 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13890 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13891 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13892 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13893 			if (word_address == address) {
   13894 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13895 				rv = 0;
   13896 				break;
   13897 			}
   13898 		}
   13899 	}
   13900 
   13901 	return rv;
   13902 }
   13903 
   13904 static int
   13905 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13906 {
   13907 	int rv = 0;
   13908 	int i;
   13909 
   13910 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13911 		device_xname(sc->sc_dev), __func__));
   13912 
   13913 	if (sc->nvm.acquire(sc) != 0)
   13914 		return -1;
   13915 
   13916 	for (i = 0; i < words; i++) {
   13917 		switch (offset + i) {
   13918 		case NVM_OFF_MACADDR:
   13919 		case NVM_OFF_MACADDR1:
   13920 		case NVM_OFF_MACADDR2:
   13921 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13922 			if (rv != 0) {
   13923 				data[i] = 0xffff;
   13924 				rv = -1;
   13925 			}
   13926 			break;
   13927 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   13928 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13929 			if (rv != 0) {
   13930 				*data = INVM_DEFAULT_AL;
   13931 				rv = 0;
   13932 			}
   13933 			break;
   13934 		case NVM_OFF_CFG2:
   13935 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13936 			if (rv != 0) {
   13937 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13938 				rv = 0;
   13939 			}
   13940 			break;
   13941 		case NVM_OFF_CFG4:
   13942 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13943 			if (rv != 0) {
   13944 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13945 				rv = 0;
   13946 			}
   13947 			break;
   13948 		case NVM_OFF_LED_1_CFG:
   13949 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13950 			if (rv != 0) {
   13951 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13952 				rv = 0;
   13953 			}
   13954 			break;
   13955 		case NVM_OFF_LED_0_2_CFG:
   13956 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13957 			if (rv != 0) {
   13958 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13959 				rv = 0;
   13960 			}
   13961 			break;
   13962 		case NVM_OFF_ID_LED_SETTINGS:
   13963 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13964 			if (rv != 0) {
   13965 				*data = ID_LED_RESERVED_FFFF;
   13966 				rv = 0;
   13967 			}
   13968 			break;
   13969 		default:
   13970 			DPRINTF(sc, WM_DEBUG_NVM,
   13971 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13972 			*data = NVM_RESERVED_WORD;
   13973 			break;
   13974 		}
   13975 	}
   13976 
   13977 	sc->nvm.release(sc);
   13978 	return rv;
   13979 }
   13980 
   13981 /* Lock, detecting NVM type, validate checksum, version and read */
   13982 
   13983 static int
   13984 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13985 {
   13986 	uint32_t eecd = 0;
   13987 
   13988 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13989 	    || sc->sc_type == WM_T_82583) {
   13990 		eecd = CSR_READ(sc, WMREG_EECD);
   13991 
   13992 		/* Isolate bits 15 & 16 */
   13993 		eecd = ((eecd >> 15) & 0x03);
   13994 
   13995 		/* If both bits are set, device is Flash type */
   13996 		if (eecd == 0x03)
   13997 			return 0;
   13998 	}
   13999 	return 1;
   14000 }
   14001 
   14002 static int
   14003 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   14004 {
   14005 	uint32_t eec;
   14006 
   14007 	eec = CSR_READ(sc, WMREG_EEC);
   14008 	if ((eec & EEC_FLASH_DETECTED) != 0)
   14009 		return 1;
   14010 
   14011 	return 0;
   14012 }
   14013 
   14014 /*
   14015  * wm_nvm_validate_checksum
   14016  *
   14017  * The checksum is defined as the sum of the first 64 (16 bit) words.
   14018  */
   14019 static int
   14020 wm_nvm_validate_checksum(struct wm_softc *sc)
   14021 {
   14022 	uint16_t checksum;
   14023 	uint16_t eeprom_data;
   14024 #ifdef WM_DEBUG
   14025 	uint16_t csum_wordaddr, valid_checksum;
   14026 #endif
   14027 	int i;
   14028 
   14029 	checksum = 0;
   14030 
   14031 	/* Don't check for I211 */
   14032 	if (sc->sc_type == WM_T_I211)
   14033 		return 0;
   14034 
   14035 #ifdef WM_DEBUG
   14036 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   14037 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   14038 		csum_wordaddr = NVM_OFF_COMPAT;
   14039 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   14040 	} else {
   14041 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   14042 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   14043 	}
   14044 
   14045 	/* Dump EEPROM image for debug */
   14046 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14047 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14048 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   14049 		/* XXX PCH_SPT? */
   14050 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   14051 		if ((eeprom_data & valid_checksum) == 0)
   14052 			DPRINTF(sc, WM_DEBUG_NVM,
   14053 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   14054 				device_xname(sc->sc_dev), eeprom_data,
   14055 				    valid_checksum));
   14056 	}
   14057 
   14058 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
   14059 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   14060 		for (i = 0; i < NVM_SIZE; i++) {
   14061 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14062 				printf("XXXX ");
   14063 			else
   14064 				printf("%04hx ", eeprom_data);
   14065 			if (i % 8 == 7)
   14066 				printf("\n");
   14067 		}
   14068 	}
   14069 
   14070 #endif /* WM_DEBUG */
   14071 
   14072 	for (i = 0; i < NVM_SIZE; i++) {
   14073 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14074 			return 1;
   14075 		checksum += eeprom_data;
   14076 	}
   14077 
   14078 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   14079 #ifdef WM_DEBUG
   14080 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   14081 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   14082 #endif
   14083 	}
   14084 
   14085 	return 0;
   14086 }
   14087 
   14088 static void
   14089 wm_nvm_version_invm(struct wm_softc *sc)
   14090 {
   14091 	uint32_t dword;
   14092 
   14093 	/*
   14094 	 * Linux's code to decode version is very strange, so we don't
   14095 	 * obey that algorithm and just use word 61 as the document.
   14096 	 * Perhaps it's not perfect though...
   14097 	 *
   14098 	 * Example:
   14099 	 *
   14100 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   14101 	 */
   14102 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   14103 	dword = __SHIFTOUT(dword, INVM_VER_1);
   14104 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   14105 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   14106 }
   14107 
   14108 static void
   14109 wm_nvm_version(struct wm_softc *sc)
   14110 {
   14111 	uint16_t major, minor, build, patch;
   14112 	uint16_t uid0, uid1;
   14113 	uint16_t nvm_data;
   14114 	uint16_t off;
   14115 	bool check_version = false;
   14116 	bool check_optionrom = false;
   14117 	bool have_build = false;
   14118 	bool have_uid = true;
   14119 
   14120 	/*
   14121 	 * Version format:
   14122 	 *
   14123 	 * XYYZ
   14124 	 * X0YZ
   14125 	 * X0YY
   14126 	 *
   14127 	 * Example:
   14128 	 *
   14129 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   14130 	 *	82571	0x50a6	5.10.6?
   14131 	 *	82572	0x506a	5.6.10?
   14132 	 *	82572EI	0x5069	5.6.9?
   14133 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   14134 	 *		0x2013	2.1.3?
   14135 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   14136 	 * ICH8+82567	0x0040	0.4.0?
   14137 	 * ICH9+82566	0x1040	1.4.0?
   14138 	 *ICH10+82567	0x0043	0.4.3?
   14139 	 *  PCH+82577	0x00c1	0.12.1?
   14140 	 * PCH2+82579	0x00d3	0.13.3?
   14141 	 *		0x00d4	0.13.4?
   14142 	 *  LPT+I218	0x0023	0.2.3?
   14143 	 *  SPT+I219	0x0084	0.8.4?
   14144 	 *  CNP+I219	0x0054	0.5.4?
   14145 	 */
   14146 
   14147 	/*
   14148 	 * XXX
   14149 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   14150 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   14151 	 */
   14152 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   14153 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   14154 		have_uid = false;
   14155 
   14156 	switch (sc->sc_type) {
   14157 	case WM_T_82571:
   14158 	case WM_T_82572:
   14159 	case WM_T_82574:
   14160 	case WM_T_82583:
   14161 		check_version = true;
   14162 		check_optionrom = true;
   14163 		have_build = true;
   14164 		break;
   14165 	case WM_T_ICH8:
   14166 	case WM_T_ICH9:
   14167 	case WM_T_ICH10:
   14168 	case WM_T_PCH:
   14169 	case WM_T_PCH2:
   14170 	case WM_T_PCH_LPT:
   14171 	case WM_T_PCH_SPT:
   14172 	case WM_T_PCH_CNP:
   14173 		check_version = true;
   14174 		have_build = true;
   14175 		have_uid = false;
   14176 		break;
   14177 	case WM_T_82575:
   14178 	case WM_T_82576:
   14179 	case WM_T_82580:
   14180 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   14181 			check_version = true;
   14182 		break;
   14183 	case WM_T_I211:
   14184 		wm_nvm_version_invm(sc);
   14185 		have_uid = false;
   14186 		goto printver;
   14187 	case WM_T_I210:
   14188 		if (!wm_nvm_flash_presence_i210(sc)) {
   14189 			wm_nvm_version_invm(sc);
   14190 			have_uid = false;
   14191 			goto printver;
   14192 		}
   14193 		/* FALLTHROUGH */
   14194 	case WM_T_I350:
   14195 	case WM_T_I354:
   14196 		check_version = true;
   14197 		check_optionrom = true;
   14198 		break;
   14199 	default:
   14200 		return;
   14201 	}
   14202 	if (check_version
   14203 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   14204 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   14205 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   14206 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   14207 			build = nvm_data & NVM_BUILD_MASK;
   14208 			have_build = true;
   14209 		} else
   14210 			minor = nvm_data & 0x00ff;
   14211 
   14212 		/* Decimal */
   14213 		minor = (minor / 16) * 10 + (minor % 16);
   14214 		sc->sc_nvm_ver_major = major;
   14215 		sc->sc_nvm_ver_minor = minor;
   14216 
   14217 printver:
   14218 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   14219 		    sc->sc_nvm_ver_minor);
   14220 		if (have_build) {
   14221 			sc->sc_nvm_ver_build = build;
   14222 			aprint_verbose(".%d", build);
   14223 		}
   14224 	}
   14225 
   14226 	/* Assume the Option ROM area is at avove NVM_SIZE */
   14227 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   14228 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   14229 		/* Option ROM Version */
   14230 		if ((off != 0x0000) && (off != 0xffff)) {
   14231 			int rv;
   14232 
   14233 			off += NVM_COMBO_VER_OFF;
   14234 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   14235 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   14236 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   14237 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   14238 				/* 16bits */
   14239 				major = uid0 >> 8;
   14240 				build = (uid0 << 8) | (uid1 >> 8);
   14241 				patch = uid1 & 0x00ff;
   14242 				aprint_verbose(", option ROM Version %d.%d.%d",
   14243 				    major, build, patch);
   14244 			}
   14245 		}
   14246 	}
   14247 
   14248 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   14249 		aprint_verbose(", Image Unique ID %08x",
   14250 		    ((uint32_t)uid1 << 16) | uid0);
   14251 }
   14252 
   14253 /*
   14254  * wm_nvm_read:
   14255  *
   14256  *	Read data from the serial EEPROM.
   14257  */
   14258 static int
   14259 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14260 {
   14261 	int rv;
   14262 
   14263 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14264 		device_xname(sc->sc_dev), __func__));
   14265 
   14266 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   14267 		return -1;
   14268 
   14269 	rv = sc->nvm.read(sc, word, wordcnt, data);
   14270 
   14271 	return rv;
   14272 }
   14273 
   14274 /*
   14275  * Hardware semaphores.
   14276  * Very complexed...
   14277  */
   14278 
   14279 static int
   14280 wm_get_null(struct wm_softc *sc)
   14281 {
   14282 
   14283 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14284 		device_xname(sc->sc_dev), __func__));
   14285 	return 0;
   14286 }
   14287 
   14288 static void
   14289 wm_put_null(struct wm_softc *sc)
   14290 {
   14291 
   14292 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14293 		device_xname(sc->sc_dev), __func__));
   14294 	return;
   14295 }
   14296 
   14297 static int
   14298 wm_get_eecd(struct wm_softc *sc)
   14299 {
   14300 	uint32_t reg;
   14301 	int x;
   14302 
   14303 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14304 		device_xname(sc->sc_dev), __func__));
   14305 
   14306 	reg = CSR_READ(sc, WMREG_EECD);
   14307 
   14308 	/* Request EEPROM access. */
   14309 	reg |= EECD_EE_REQ;
   14310 	CSR_WRITE(sc, WMREG_EECD, reg);
   14311 
   14312 	/* ..and wait for it to be granted. */
   14313 	for (x = 0; x < 1000; x++) {
   14314 		reg = CSR_READ(sc, WMREG_EECD);
   14315 		if (reg & EECD_EE_GNT)
   14316 			break;
   14317 		delay(5);
   14318 	}
   14319 	if ((reg & EECD_EE_GNT) == 0) {
   14320 		aprint_error_dev(sc->sc_dev,
   14321 		    "could not acquire EEPROM GNT\n");
   14322 		reg &= ~EECD_EE_REQ;
   14323 		CSR_WRITE(sc, WMREG_EECD, reg);
   14324 		return -1;
   14325 	}
   14326 
   14327 	return 0;
   14328 }
   14329 
   14330 static void
   14331 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   14332 {
   14333 
   14334 	*eecd |= EECD_SK;
   14335 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14336 	CSR_WRITE_FLUSH(sc);
   14337 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14338 		delay(1);
   14339 	else
   14340 		delay(50);
   14341 }
   14342 
   14343 static void
   14344 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   14345 {
   14346 
   14347 	*eecd &= ~EECD_SK;
   14348 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14349 	CSR_WRITE_FLUSH(sc);
   14350 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14351 		delay(1);
   14352 	else
   14353 		delay(50);
   14354 }
   14355 
   14356 static void
   14357 wm_put_eecd(struct wm_softc *sc)
   14358 {
   14359 	uint32_t reg;
   14360 
   14361 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14362 		device_xname(sc->sc_dev), __func__));
   14363 
   14364 	/* Stop nvm */
   14365 	reg = CSR_READ(sc, WMREG_EECD);
   14366 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   14367 		/* Pull CS high */
   14368 		reg |= EECD_CS;
   14369 		wm_nvm_eec_clock_lower(sc, &reg);
   14370 	} else {
   14371 		/* CS on Microwire is active-high */
   14372 		reg &= ~(EECD_CS | EECD_DI);
   14373 		CSR_WRITE(sc, WMREG_EECD, reg);
   14374 		wm_nvm_eec_clock_raise(sc, &reg);
   14375 		wm_nvm_eec_clock_lower(sc, &reg);
   14376 	}
   14377 
   14378 	reg = CSR_READ(sc, WMREG_EECD);
   14379 	reg &= ~EECD_EE_REQ;
   14380 	CSR_WRITE(sc, WMREG_EECD, reg);
   14381 
   14382 	return;
   14383 }
   14384 
   14385 /*
   14386  * Get hardware semaphore.
   14387  * Same as e1000_get_hw_semaphore_generic()
   14388  */
   14389 static int
   14390 wm_get_swsm_semaphore(struct wm_softc *sc)
   14391 {
   14392 	int32_t timeout;
   14393 	uint32_t swsm;
   14394 
   14395 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14396 		device_xname(sc->sc_dev), __func__));
   14397 	KASSERT(sc->sc_nvm_wordsize > 0);
   14398 
   14399 retry:
   14400 	/* Get the SW semaphore. */
   14401 	timeout = sc->sc_nvm_wordsize + 1;
   14402 	while (timeout) {
   14403 		swsm = CSR_READ(sc, WMREG_SWSM);
   14404 
   14405 		if ((swsm & SWSM_SMBI) == 0)
   14406 			break;
   14407 
   14408 		delay(50);
   14409 		timeout--;
   14410 	}
   14411 
   14412 	if (timeout == 0) {
   14413 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   14414 			/*
   14415 			 * In rare circumstances, the SW semaphore may already
   14416 			 * be held unintentionally. Clear the semaphore once
   14417 			 * before giving up.
   14418 			 */
   14419 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   14420 			wm_put_swsm_semaphore(sc);
   14421 			goto retry;
   14422 		}
   14423 		aprint_error_dev(sc->sc_dev,
   14424 		    "could not acquire SWSM SMBI\n");
   14425 		return 1;
   14426 	}
   14427 
   14428 	/* Get the FW semaphore. */
   14429 	timeout = sc->sc_nvm_wordsize + 1;
   14430 	while (timeout) {
   14431 		swsm = CSR_READ(sc, WMREG_SWSM);
   14432 		swsm |= SWSM_SWESMBI;
   14433 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   14434 		/* If we managed to set the bit we got the semaphore. */
   14435 		swsm = CSR_READ(sc, WMREG_SWSM);
   14436 		if (swsm & SWSM_SWESMBI)
   14437 			break;
   14438 
   14439 		delay(50);
   14440 		timeout--;
   14441 	}
   14442 
   14443 	if (timeout == 0) {
   14444 		aprint_error_dev(sc->sc_dev,
   14445 		    "could not acquire SWSM SWESMBI\n");
   14446 		/* Release semaphores */
   14447 		wm_put_swsm_semaphore(sc);
   14448 		return 1;
   14449 	}
   14450 	return 0;
   14451 }
   14452 
   14453 /*
   14454  * Put hardware semaphore.
   14455  * Same as e1000_put_hw_semaphore_generic()
   14456  */
   14457 static void
   14458 wm_put_swsm_semaphore(struct wm_softc *sc)
   14459 {
   14460 	uint32_t swsm;
   14461 
   14462 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14463 		device_xname(sc->sc_dev), __func__));
   14464 
   14465 	swsm = CSR_READ(sc, WMREG_SWSM);
   14466 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   14467 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   14468 }
   14469 
   14470 /*
   14471  * Get SW/FW semaphore.
   14472  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   14473  */
   14474 static int
   14475 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14476 {
   14477 	uint32_t swfw_sync;
   14478 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   14479 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   14480 	int timeout;
   14481 
   14482 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14483 		device_xname(sc->sc_dev), __func__));
   14484 
   14485 	if (sc->sc_type == WM_T_80003)
   14486 		timeout = 50;
   14487 	else
   14488 		timeout = 200;
   14489 
   14490 	while (timeout) {
   14491 		if (wm_get_swsm_semaphore(sc)) {
   14492 			aprint_error_dev(sc->sc_dev,
   14493 			    "%s: failed to get semaphore\n",
   14494 			    __func__);
   14495 			return 1;
   14496 		}
   14497 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14498 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   14499 			swfw_sync |= swmask;
   14500 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14501 			wm_put_swsm_semaphore(sc);
   14502 			return 0;
   14503 		}
   14504 		wm_put_swsm_semaphore(sc);
   14505 		delay(5000);
   14506 		timeout--;
   14507 	}
   14508 	device_printf(sc->sc_dev,
   14509 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   14510 	    mask, swfw_sync);
   14511 	return 1;
   14512 }
   14513 
   14514 static void
   14515 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14516 {
   14517 	uint32_t swfw_sync;
   14518 
   14519 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14520 		device_xname(sc->sc_dev), __func__));
   14521 
   14522 	while (wm_get_swsm_semaphore(sc) != 0)
   14523 		continue;
   14524 
   14525 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14526 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   14527 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14528 
   14529 	wm_put_swsm_semaphore(sc);
   14530 }
   14531 
   14532 static int
   14533 wm_get_nvm_80003(struct wm_softc *sc)
   14534 {
   14535 	int rv;
   14536 
   14537 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14538 		device_xname(sc->sc_dev), __func__));
   14539 
   14540 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   14541 		aprint_error_dev(sc->sc_dev,
   14542 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   14543 		return rv;
   14544 	}
   14545 
   14546 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14547 	    && (rv = wm_get_eecd(sc)) != 0) {
   14548 		aprint_error_dev(sc->sc_dev,
   14549 		    "%s: failed to get semaphore(EECD)\n", __func__);
   14550 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14551 		return rv;
   14552 	}
   14553 
   14554 	return 0;
   14555 }
   14556 
   14557 static void
   14558 wm_put_nvm_80003(struct wm_softc *sc)
   14559 {
   14560 
   14561 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14562 		device_xname(sc->sc_dev), __func__));
   14563 
   14564 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14565 		wm_put_eecd(sc);
   14566 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14567 }
   14568 
   14569 static int
   14570 wm_get_nvm_82571(struct wm_softc *sc)
   14571 {
   14572 	int rv;
   14573 
   14574 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14575 		device_xname(sc->sc_dev), __func__));
   14576 
   14577 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   14578 		return rv;
   14579 
   14580 	switch (sc->sc_type) {
   14581 	case WM_T_82573:
   14582 		break;
   14583 	default:
   14584 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14585 			rv = wm_get_eecd(sc);
   14586 		break;
   14587 	}
   14588 
   14589 	if (rv != 0) {
   14590 		aprint_error_dev(sc->sc_dev,
   14591 		    "%s: failed to get semaphore\n",
   14592 		    __func__);
   14593 		wm_put_swsm_semaphore(sc);
   14594 	}
   14595 
   14596 	return rv;
   14597 }
   14598 
   14599 static void
   14600 wm_put_nvm_82571(struct wm_softc *sc)
   14601 {
   14602 
   14603 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14604 		device_xname(sc->sc_dev), __func__));
   14605 
   14606 	switch (sc->sc_type) {
   14607 	case WM_T_82573:
   14608 		break;
   14609 	default:
   14610 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14611 			wm_put_eecd(sc);
   14612 		break;
   14613 	}
   14614 
   14615 	wm_put_swsm_semaphore(sc);
   14616 }
   14617 
   14618 static int
   14619 wm_get_phy_82575(struct wm_softc *sc)
   14620 {
   14621 
   14622 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14623 		device_xname(sc->sc_dev), __func__));
   14624 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14625 }
   14626 
   14627 static void
   14628 wm_put_phy_82575(struct wm_softc *sc)
   14629 {
   14630 
   14631 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14632 		device_xname(sc->sc_dev), __func__));
   14633 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14634 }
   14635 
   14636 static int
   14637 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   14638 {
   14639 	uint32_t ext_ctrl;
   14640 	int timeout = 200;
   14641 
   14642 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14643 		device_xname(sc->sc_dev), __func__));
   14644 
   14645 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14646 	for (timeout = 0; timeout < 200; timeout++) {
   14647 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14648 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14649 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14650 
   14651 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14652 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14653 			return 0;
   14654 		delay(5000);
   14655 	}
   14656 	device_printf(sc->sc_dev,
   14657 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   14658 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14659 	return 1;
   14660 }
   14661 
   14662 static void
   14663 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   14664 {
   14665 	uint32_t ext_ctrl;
   14666 
   14667 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14668 		device_xname(sc->sc_dev), __func__));
   14669 
   14670 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14671 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14672 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14673 
   14674 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14675 }
   14676 
   14677 static int
   14678 wm_get_swflag_ich8lan(struct wm_softc *sc)
   14679 {
   14680 	uint32_t ext_ctrl;
   14681 	int timeout;
   14682 
   14683 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14684 		device_xname(sc->sc_dev), __func__));
   14685 	mutex_enter(sc->sc_ich_phymtx);
   14686 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14687 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14688 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14689 			break;
   14690 		delay(1000);
   14691 	}
   14692 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14693 		device_printf(sc->sc_dev,
   14694 		    "SW has already locked the resource\n");
   14695 		goto out;
   14696 	}
   14697 
   14698 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14699 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14700 	for (timeout = 0; timeout < 1000; timeout++) {
   14701 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14702 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14703 			break;
   14704 		delay(1000);
   14705 	}
   14706 	if (timeout >= 1000) {
   14707 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   14708 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14709 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14710 		goto out;
   14711 	}
   14712 	return 0;
   14713 
   14714 out:
   14715 	mutex_exit(sc->sc_ich_phymtx);
   14716 	return 1;
   14717 }
   14718 
   14719 static void
   14720 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14721 {
   14722 	uint32_t ext_ctrl;
   14723 
   14724 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14725 		device_xname(sc->sc_dev), __func__));
   14726 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14727 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14728 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14729 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14730 	} else {
   14731 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   14732 	}
   14733 
   14734 	mutex_exit(sc->sc_ich_phymtx);
   14735 }
   14736 
   14737 static int
   14738 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14739 {
   14740 
   14741 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14742 		device_xname(sc->sc_dev), __func__));
   14743 	mutex_enter(sc->sc_ich_nvmmtx);
   14744 
   14745 	return 0;
   14746 }
   14747 
   14748 static void
   14749 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14750 {
   14751 
   14752 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14753 		device_xname(sc->sc_dev), __func__));
   14754 	mutex_exit(sc->sc_ich_nvmmtx);
   14755 }
   14756 
   14757 static int
   14758 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14759 {
   14760 	int i = 0;
   14761 	uint32_t reg;
   14762 
   14763 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14764 		device_xname(sc->sc_dev), __func__));
   14765 
   14766 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14767 	do {
   14768 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14769 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14770 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14771 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14772 			break;
   14773 		delay(2*1000);
   14774 		i++;
   14775 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14776 
   14777 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14778 		wm_put_hw_semaphore_82573(sc);
   14779 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14780 		    device_xname(sc->sc_dev));
   14781 		return -1;
   14782 	}
   14783 
   14784 	return 0;
   14785 }
   14786 
   14787 static void
   14788 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14789 {
   14790 	uint32_t reg;
   14791 
   14792 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14793 		device_xname(sc->sc_dev), __func__));
   14794 
   14795 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14796 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14797 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14798 }
   14799 
   14800 /*
   14801  * Management mode and power management related subroutines.
   14802  * BMC, AMT, suspend/resume and EEE.
   14803  */
   14804 
   14805 #ifdef WM_WOL
   14806 static int
   14807 wm_check_mng_mode(struct wm_softc *sc)
   14808 {
   14809 	int rv;
   14810 
   14811 	switch (sc->sc_type) {
   14812 	case WM_T_ICH8:
   14813 	case WM_T_ICH9:
   14814 	case WM_T_ICH10:
   14815 	case WM_T_PCH:
   14816 	case WM_T_PCH2:
   14817 	case WM_T_PCH_LPT:
   14818 	case WM_T_PCH_SPT:
   14819 	case WM_T_PCH_CNP:
   14820 		rv = wm_check_mng_mode_ich8lan(sc);
   14821 		break;
   14822 	case WM_T_82574:
   14823 	case WM_T_82583:
   14824 		rv = wm_check_mng_mode_82574(sc);
   14825 		break;
   14826 	case WM_T_82571:
   14827 	case WM_T_82572:
   14828 	case WM_T_82573:
   14829 	case WM_T_80003:
   14830 		rv = wm_check_mng_mode_generic(sc);
   14831 		break;
   14832 	default:
   14833 		/* Noting to do */
   14834 		rv = 0;
   14835 		break;
   14836 	}
   14837 
   14838 	return rv;
   14839 }
   14840 
   14841 static int
   14842 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14843 {
   14844 	uint32_t fwsm;
   14845 
   14846 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14847 
   14848 	if (((fwsm & FWSM_FW_VALID) != 0)
   14849 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14850 		return 1;
   14851 
   14852 	return 0;
   14853 }
   14854 
   14855 static int
   14856 wm_check_mng_mode_82574(struct wm_softc *sc)
   14857 {
   14858 	uint16_t data;
   14859 
   14860 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14861 
   14862 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14863 		return 1;
   14864 
   14865 	return 0;
   14866 }
   14867 
   14868 static int
   14869 wm_check_mng_mode_generic(struct wm_softc *sc)
   14870 {
   14871 	uint32_t fwsm;
   14872 
   14873 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14874 
   14875 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14876 		return 1;
   14877 
   14878 	return 0;
   14879 }
   14880 #endif /* WM_WOL */
   14881 
   14882 static int
   14883 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14884 {
   14885 	uint32_t manc, fwsm, factps;
   14886 
   14887 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14888 		return 0;
   14889 
   14890 	manc = CSR_READ(sc, WMREG_MANC);
   14891 
   14892 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14893 		device_xname(sc->sc_dev), manc));
   14894 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14895 		return 0;
   14896 
   14897 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14898 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14899 		factps = CSR_READ(sc, WMREG_FACTPS);
   14900 		if (((factps & FACTPS_MNGCG) == 0)
   14901 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14902 			return 1;
   14903 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14904 		uint16_t data;
   14905 
   14906 		factps = CSR_READ(sc, WMREG_FACTPS);
   14907 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14908 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14909 			device_xname(sc->sc_dev), factps, data));
   14910 		if (((factps & FACTPS_MNGCG) == 0)
   14911 		    && ((data & NVM_CFG2_MNGM_MASK)
   14912 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14913 			return 1;
   14914 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14915 	    && ((manc & MANC_ASF_EN) == 0))
   14916 		return 1;
   14917 
   14918 	return 0;
   14919 }
   14920 
   14921 static bool
   14922 wm_phy_resetisblocked(struct wm_softc *sc)
   14923 {
   14924 	bool blocked = false;
   14925 	uint32_t reg;
   14926 	int i = 0;
   14927 
   14928 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   14929 		device_xname(sc->sc_dev), __func__));
   14930 
   14931 	switch (sc->sc_type) {
   14932 	case WM_T_ICH8:
   14933 	case WM_T_ICH9:
   14934 	case WM_T_ICH10:
   14935 	case WM_T_PCH:
   14936 	case WM_T_PCH2:
   14937 	case WM_T_PCH_LPT:
   14938 	case WM_T_PCH_SPT:
   14939 	case WM_T_PCH_CNP:
   14940 		do {
   14941 			reg = CSR_READ(sc, WMREG_FWSM);
   14942 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14943 				blocked = true;
   14944 				delay(10*1000);
   14945 				continue;
   14946 			}
   14947 			blocked = false;
   14948 		} while (blocked && (i++ < 30));
   14949 		return blocked;
   14950 		break;
   14951 	case WM_T_82571:
   14952 	case WM_T_82572:
   14953 	case WM_T_82573:
   14954 	case WM_T_82574:
   14955 	case WM_T_82583:
   14956 	case WM_T_80003:
   14957 		reg = CSR_READ(sc, WMREG_MANC);
   14958 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14959 			return true;
   14960 		else
   14961 			return false;
   14962 		break;
   14963 	default:
   14964 		/* No problem */
   14965 		break;
   14966 	}
   14967 
   14968 	return false;
   14969 }
   14970 
   14971 static void
   14972 wm_get_hw_control(struct wm_softc *sc)
   14973 {
   14974 	uint32_t reg;
   14975 
   14976 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14977 		device_xname(sc->sc_dev), __func__));
   14978 
   14979 	if (sc->sc_type == WM_T_82573) {
   14980 		reg = CSR_READ(sc, WMREG_SWSM);
   14981 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14982 	} else if (sc->sc_type >= WM_T_82571) {
   14983 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14984 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14985 	}
   14986 }
   14987 
   14988 static void
   14989 wm_release_hw_control(struct wm_softc *sc)
   14990 {
   14991 	uint32_t reg;
   14992 
   14993 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14994 		device_xname(sc->sc_dev), __func__));
   14995 
   14996 	if (sc->sc_type == WM_T_82573) {
   14997 		reg = CSR_READ(sc, WMREG_SWSM);
   14998 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14999 	} else if (sc->sc_type >= WM_T_82571) {
   15000 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15001 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   15002 	}
   15003 }
   15004 
   15005 static void
   15006 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   15007 {
   15008 	uint32_t reg;
   15009 
   15010 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15011 		device_xname(sc->sc_dev), __func__));
   15012 
   15013 	if (sc->sc_type < WM_T_PCH2)
   15014 		return;
   15015 
   15016 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15017 
   15018 	if (gate)
   15019 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   15020 	else
   15021 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   15022 
   15023 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15024 }
   15025 
   15026 static int
   15027 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   15028 {
   15029 	uint32_t fwsm, reg;
   15030 	int rv = 0;
   15031 
   15032 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15033 		device_xname(sc->sc_dev), __func__));
   15034 
   15035 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   15036 	wm_gate_hw_phy_config_ich8lan(sc, true);
   15037 
   15038 	/* Disable ULP */
   15039 	wm_ulp_disable(sc);
   15040 
   15041 	/* Acquire PHY semaphore */
   15042 	rv = sc->phy.acquire(sc);
   15043 	if (rv != 0) {
   15044 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   15045 		device_xname(sc->sc_dev), __func__));
   15046 		return -1;
   15047 	}
   15048 
   15049 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   15050 	 * inaccessible and resetting the PHY is not blocked, toggle the
   15051 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   15052 	 */
   15053 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15054 	switch (sc->sc_type) {
   15055 	case WM_T_PCH_LPT:
   15056 	case WM_T_PCH_SPT:
   15057 	case WM_T_PCH_CNP:
   15058 		if (wm_phy_is_accessible_pchlan(sc))
   15059 			break;
   15060 
   15061 		/* Before toggling LANPHYPC, see if PHY is accessible by
   15062 		 * forcing MAC to SMBus mode first.
   15063 		 */
   15064 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15065 		reg |= CTRL_EXT_FORCE_SMBUS;
   15066 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15067 #if 0
   15068 		/* XXX Isn't this required??? */
   15069 		CSR_WRITE_FLUSH(sc);
   15070 #endif
   15071 		/* Wait 50 milliseconds for MAC to finish any retries
   15072 		 * that it might be trying to perform from previous
   15073 		 * attempts to acknowledge any phy read requests.
   15074 		 */
   15075 		delay(50 * 1000);
   15076 		/* FALLTHROUGH */
   15077 	case WM_T_PCH2:
   15078 		if (wm_phy_is_accessible_pchlan(sc) == true)
   15079 			break;
   15080 		/* FALLTHROUGH */
   15081 	case WM_T_PCH:
   15082 		if (sc->sc_type == WM_T_PCH)
   15083 			if ((fwsm & FWSM_FW_VALID) != 0)
   15084 				break;
   15085 
   15086 		if (wm_phy_resetisblocked(sc) == true) {
   15087 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   15088 			break;
   15089 		}
   15090 
   15091 		/* Toggle LANPHYPC Value bit */
   15092 		wm_toggle_lanphypc_pch_lpt(sc);
   15093 
   15094 		if (sc->sc_type >= WM_T_PCH_LPT) {
   15095 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15096 				break;
   15097 
   15098 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   15099 			 * so ensure that the MAC is also out of SMBus mode
   15100 			 */
   15101 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15102 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15103 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15104 
   15105 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15106 				break;
   15107 			rv = -1;
   15108 		}
   15109 		break;
   15110 	default:
   15111 		break;
   15112 	}
   15113 
   15114 	/* Release semaphore */
   15115 	sc->phy.release(sc);
   15116 
   15117 	if (rv == 0) {
   15118 		/* Check to see if able to reset PHY.  Print error if not */
   15119 		if (wm_phy_resetisblocked(sc)) {
   15120 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15121 			goto out;
   15122 		}
   15123 
   15124 		/* Reset the PHY before any access to it.  Doing so, ensures
   15125 		 * that the PHY is in a known good state before we read/write
   15126 		 * PHY registers.  The generic reset is sufficient here,
   15127 		 * because we haven't determined the PHY type yet.
   15128 		 */
   15129 		if (wm_reset_phy(sc) != 0)
   15130 			goto out;
   15131 
   15132 		/* On a successful reset, possibly need to wait for the PHY
   15133 		 * to quiesce to an accessible state before returning control
   15134 		 * to the calling function.  If the PHY does not quiesce, then
   15135 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   15136 		 *  the PHY is in.
   15137 		 */
   15138 		if (wm_phy_resetisblocked(sc))
   15139 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15140 	}
   15141 
   15142 out:
   15143 	/* Ungate automatic PHY configuration on non-managed 82579 */
   15144 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   15145 		delay(10*1000);
   15146 		wm_gate_hw_phy_config_ich8lan(sc, false);
   15147 	}
   15148 
   15149 	return 0;
   15150 }
   15151 
   15152 static void
   15153 wm_init_manageability(struct wm_softc *sc)
   15154 {
   15155 
   15156 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15157 		device_xname(sc->sc_dev), __func__));
   15158 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15159 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   15160 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15161 
   15162 		/* Disable hardware interception of ARP */
   15163 		manc &= ~MANC_ARP_EN;
   15164 
   15165 		/* Enable receiving management packets to the host */
   15166 		if (sc->sc_type >= WM_T_82571) {
   15167 			manc |= MANC_EN_MNG2HOST;
   15168 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   15169 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   15170 		}
   15171 
   15172 		CSR_WRITE(sc, WMREG_MANC, manc);
   15173 	}
   15174 }
   15175 
   15176 static void
   15177 wm_release_manageability(struct wm_softc *sc)
   15178 {
   15179 
   15180 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15181 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15182 
   15183 		manc |= MANC_ARP_EN;
   15184 		if (sc->sc_type >= WM_T_82571)
   15185 			manc &= ~MANC_EN_MNG2HOST;
   15186 
   15187 		CSR_WRITE(sc, WMREG_MANC, manc);
   15188 	}
   15189 }
   15190 
   15191 static void
   15192 wm_get_wakeup(struct wm_softc *sc)
   15193 {
   15194 
   15195 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   15196 	switch (sc->sc_type) {
   15197 	case WM_T_82573:
   15198 	case WM_T_82583:
   15199 		sc->sc_flags |= WM_F_HAS_AMT;
   15200 		/* FALLTHROUGH */
   15201 	case WM_T_80003:
   15202 	case WM_T_82575:
   15203 	case WM_T_82576:
   15204 	case WM_T_82580:
   15205 	case WM_T_I350:
   15206 	case WM_T_I354:
   15207 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   15208 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   15209 		/* FALLTHROUGH */
   15210 	case WM_T_82541:
   15211 	case WM_T_82541_2:
   15212 	case WM_T_82547:
   15213 	case WM_T_82547_2:
   15214 	case WM_T_82571:
   15215 	case WM_T_82572:
   15216 	case WM_T_82574:
   15217 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15218 		break;
   15219 	case WM_T_ICH8:
   15220 	case WM_T_ICH9:
   15221 	case WM_T_ICH10:
   15222 	case WM_T_PCH:
   15223 	case WM_T_PCH2:
   15224 	case WM_T_PCH_LPT:
   15225 	case WM_T_PCH_SPT:
   15226 	case WM_T_PCH_CNP:
   15227 		sc->sc_flags |= WM_F_HAS_AMT;
   15228 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15229 		break;
   15230 	default:
   15231 		break;
   15232 	}
   15233 
   15234 	/* 1: HAS_MANAGE */
   15235 	if (wm_enable_mng_pass_thru(sc) != 0)
   15236 		sc->sc_flags |= WM_F_HAS_MANAGE;
   15237 
   15238 	/*
   15239 	 * Note that the WOL flags is set after the resetting of the eeprom
   15240 	 * stuff
   15241 	 */
   15242 }
   15243 
   15244 /*
   15245  * Unconfigure Ultra Low Power mode.
   15246  * Only for I217 and newer (see below).
   15247  */
   15248 static int
   15249 wm_ulp_disable(struct wm_softc *sc)
   15250 {
   15251 	uint32_t reg;
   15252 	uint16_t phyreg;
   15253 	int i = 0, rv = 0;
   15254 
   15255 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15256 		device_xname(sc->sc_dev), __func__));
   15257 	/* Exclude old devices */
   15258 	if ((sc->sc_type < WM_T_PCH_LPT)
   15259 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   15260 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   15261 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   15262 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   15263 		return 0;
   15264 
   15265 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   15266 		/* Request ME un-configure ULP mode in the PHY */
   15267 		reg = CSR_READ(sc, WMREG_H2ME);
   15268 		reg &= ~H2ME_ULP;
   15269 		reg |= H2ME_ENFORCE_SETTINGS;
   15270 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15271 
   15272 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   15273 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   15274 			if (i++ == 30) {
   15275 				device_printf(sc->sc_dev, "%s timed out\n",
   15276 				    __func__);
   15277 				return -1;
   15278 			}
   15279 			delay(10 * 1000);
   15280 		}
   15281 		reg = CSR_READ(sc, WMREG_H2ME);
   15282 		reg &= ~H2ME_ENFORCE_SETTINGS;
   15283 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15284 
   15285 		return 0;
   15286 	}
   15287 
   15288 	/* Acquire semaphore */
   15289 	rv = sc->phy.acquire(sc);
   15290 	if (rv != 0) {
   15291 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   15292 		device_xname(sc->sc_dev), __func__));
   15293 		return -1;
   15294 	}
   15295 
   15296 	/* Toggle LANPHYPC */
   15297 	wm_toggle_lanphypc_pch_lpt(sc);
   15298 
   15299 	/* Unforce SMBus mode in PHY */
   15300 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   15301 	if (rv != 0) {
   15302 		uint32_t reg2;
   15303 
   15304 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   15305 			__func__);
   15306 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   15307 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   15308 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   15309 		delay(50 * 1000);
   15310 
   15311 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   15312 		    &phyreg);
   15313 		if (rv != 0)
   15314 			goto release;
   15315 	}
   15316 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15317 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   15318 
   15319 	/* Unforce SMBus mode in MAC */
   15320 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15321 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   15322 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15323 
   15324 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   15325 	if (rv != 0)
   15326 		goto release;
   15327 	phyreg |= HV_PM_CTRL_K1_ENA;
   15328 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   15329 
   15330 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   15331 		&phyreg);
   15332 	if (rv != 0)
   15333 		goto release;
   15334 	phyreg &= ~(I218_ULP_CONFIG1_IND
   15335 	    | I218_ULP_CONFIG1_STICKY_ULP
   15336 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   15337 	    | I218_ULP_CONFIG1_WOL_HOST
   15338 	    | I218_ULP_CONFIG1_INBAND_EXIT
   15339 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   15340 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   15341 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   15342 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15343 	phyreg |= I218_ULP_CONFIG1_START;
   15344 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15345 
   15346 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15347 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   15348 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15349 
   15350 release:
   15351 	/* Release semaphore */
   15352 	sc->phy.release(sc);
   15353 	wm_gmii_reset(sc);
   15354 	delay(50 * 1000);
   15355 
   15356 	return rv;
   15357 }
   15358 
   15359 /* WOL in the newer chipset interfaces (pchlan) */
   15360 static int
   15361 wm_enable_phy_wakeup(struct wm_softc *sc)
   15362 {
   15363 	device_t dev = sc->sc_dev;
   15364 	uint32_t mreg, moff;
   15365 	uint16_t wuce, wuc, wufc, preg;
   15366 	int i, rv;
   15367 
   15368 	KASSERT(sc->sc_type >= WM_T_PCH);
   15369 
   15370 	/* Copy MAC RARs to PHY RARs */
   15371 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   15372 
   15373 	/* Activate PHY wakeup */
   15374 	rv = sc->phy.acquire(sc);
   15375 	if (rv != 0) {
   15376 		device_printf(dev, "%s: failed to acquire semaphore\n",
   15377 		    __func__);
   15378 		return rv;
   15379 	}
   15380 
   15381 	/*
   15382 	 * Enable access to PHY wakeup registers.
   15383 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   15384 	 */
   15385 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   15386 	if (rv != 0) {
   15387 		device_printf(dev,
   15388 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   15389 		goto release;
   15390 	}
   15391 
   15392 	/* Copy MAC MTA to PHY MTA */
   15393 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   15394 		uint16_t lo, hi;
   15395 
   15396 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   15397 		lo = (uint16_t)(mreg & 0xffff);
   15398 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   15399 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   15400 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   15401 	}
   15402 
   15403 	/* Configure PHY Rx Control register */
   15404 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   15405 	mreg = CSR_READ(sc, WMREG_RCTL);
   15406 	if (mreg & RCTL_UPE)
   15407 		preg |= BM_RCTL_UPE;
   15408 	if (mreg & RCTL_MPE)
   15409 		preg |= BM_RCTL_MPE;
   15410 	preg &= ~(BM_RCTL_MO_MASK);
   15411 	moff = __SHIFTOUT(mreg, RCTL_MO);
   15412 	if (moff != 0)
   15413 		preg |= moff << BM_RCTL_MO_SHIFT;
   15414 	if (mreg & RCTL_BAM)
   15415 		preg |= BM_RCTL_BAM;
   15416 	if (mreg & RCTL_PMCF)
   15417 		preg |= BM_RCTL_PMCF;
   15418 	mreg = CSR_READ(sc, WMREG_CTRL);
   15419 	if (mreg & CTRL_RFCE)
   15420 		preg |= BM_RCTL_RFCE;
   15421 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   15422 
   15423 	wuc = WUC_APME | WUC_PME_EN;
   15424 	wufc = WUFC_MAG;
   15425 	/* Enable PHY wakeup in MAC register */
   15426 	CSR_WRITE(sc, WMREG_WUC,
   15427 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   15428 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   15429 
   15430 	/* Configure and enable PHY wakeup in PHY registers */
   15431 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   15432 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   15433 
   15434 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   15435 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15436 
   15437 release:
   15438 	sc->phy.release(sc);
   15439 
   15440 	return 0;
   15441 }
   15442 
   15443 /* Power down workaround on D3 */
   15444 static void
   15445 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   15446 {
   15447 	uint32_t reg;
   15448 	uint16_t phyreg;
   15449 	int i;
   15450 
   15451 	for (i = 0; i < 2; i++) {
   15452 		/* Disable link */
   15453 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15454 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15455 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15456 
   15457 		/*
   15458 		 * Call gig speed drop workaround on Gig disable before
   15459 		 * accessing any PHY registers
   15460 		 */
   15461 		if (sc->sc_type == WM_T_ICH8)
   15462 			wm_gig_downshift_workaround_ich8lan(sc);
   15463 
   15464 		/* Write VR power-down enable */
   15465 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15466 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15467 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   15468 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   15469 
   15470 		/* Read it back and test */
   15471 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15472 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15473 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   15474 			break;
   15475 
   15476 		/* Issue PHY reset and repeat at most one more time */
   15477 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   15478 	}
   15479 }
   15480 
   15481 /*
   15482  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   15483  *  @sc: pointer to the HW structure
   15484  *
   15485  *  During S0 to Sx transition, it is possible the link remains at gig
   15486  *  instead of negotiating to a lower speed.  Before going to Sx, set
   15487  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   15488  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   15489  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   15490  *  needs to be written.
   15491  *  Parts that support (and are linked to a partner which support) EEE in
   15492  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   15493  *  than 10Mbps w/o EEE.
   15494  */
   15495 static void
   15496 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   15497 {
   15498 	device_t dev = sc->sc_dev;
   15499 	struct ethercom *ec = &sc->sc_ethercom;
   15500 	uint32_t phy_ctrl;
   15501 	int rv;
   15502 
   15503 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   15504 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   15505 
   15506 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   15507 
   15508 	if (sc->sc_phytype == WMPHY_I217) {
   15509 		uint16_t devid = sc->sc_pcidevid;
   15510 
   15511 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   15512 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   15513 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   15514 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   15515 		    (sc->sc_type >= WM_T_PCH_SPT))
   15516 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   15517 			    CSR_READ(sc, WMREG_FEXTNVM6)
   15518 			    & ~FEXTNVM6_REQ_PLL_CLK);
   15519 
   15520 		if (sc->phy.acquire(sc) != 0)
   15521 			goto out;
   15522 
   15523 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15524 			uint16_t eee_advert;
   15525 
   15526 			rv = wm_read_emi_reg_locked(dev,
   15527 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   15528 			if (rv)
   15529 				goto release;
   15530 
   15531 			/*
   15532 			 * Disable LPLU if both link partners support 100BaseT
   15533 			 * EEE and 100Full is advertised on both ends of the
   15534 			 * link, and enable Auto Enable LPI since there will
   15535 			 * be no driver to enable LPI while in Sx.
   15536 			 */
   15537 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   15538 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   15539 				uint16_t anar, phy_reg;
   15540 
   15541 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   15542 				    &anar);
   15543 				if (anar & ANAR_TX_FD) {
   15544 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   15545 					    PHY_CTRL_NOND0A_LPLU);
   15546 
   15547 					/* Set Auto Enable LPI after link up */
   15548 					sc->phy.readreg_locked(dev, 2,
   15549 					    I217_LPI_GPIO_CTRL, &phy_reg);
   15550 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15551 					sc->phy.writereg_locked(dev, 2,
   15552 					    I217_LPI_GPIO_CTRL, phy_reg);
   15553 				}
   15554 			}
   15555 		}
   15556 
   15557 		/*
   15558 		 * For i217 Intel Rapid Start Technology support,
   15559 		 * when the system is going into Sx and no manageability engine
   15560 		 * is present, the driver must configure proxy to reset only on
   15561 		 * power good.	LPI (Low Power Idle) state must also reset only
   15562 		 * on power good, as well as the MTA (Multicast table array).
   15563 		 * The SMBus release must also be disabled on LCD reset.
   15564 		 */
   15565 
   15566 		/*
   15567 		 * Enable MTA to reset for Intel Rapid Start Technology
   15568 		 * Support
   15569 		 */
   15570 
   15571 release:
   15572 		sc->phy.release(sc);
   15573 	}
   15574 out:
   15575 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   15576 
   15577 	if (sc->sc_type == WM_T_ICH8)
   15578 		wm_gig_downshift_workaround_ich8lan(sc);
   15579 
   15580 	if (sc->sc_type >= WM_T_PCH) {
   15581 		wm_oem_bits_config_ich8lan(sc, false);
   15582 
   15583 		/* Reset PHY to activate OEM bits on 82577/8 */
   15584 		if (sc->sc_type == WM_T_PCH)
   15585 			wm_reset_phy(sc);
   15586 
   15587 		if (sc->phy.acquire(sc) != 0)
   15588 			return;
   15589 		wm_write_smbus_addr(sc);
   15590 		sc->phy.release(sc);
   15591 	}
   15592 }
   15593 
   15594 /*
   15595  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   15596  *  @sc: pointer to the HW structure
   15597  *
   15598  *  During Sx to S0 transitions on non-managed devices or managed devices
   15599  *  on which PHY resets are not blocked, if the PHY registers cannot be
   15600  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   15601  *  the PHY.
   15602  *  On i217, setup Intel Rapid Start Technology.
   15603  */
   15604 static int
   15605 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   15606 {
   15607 	device_t dev = sc->sc_dev;
   15608 	int rv;
   15609 
   15610 	if (sc->sc_type < WM_T_PCH2)
   15611 		return 0;
   15612 
   15613 	rv = wm_init_phy_workarounds_pchlan(sc);
   15614 	if (rv != 0)
   15615 		return -1;
   15616 
   15617 	/* For i217 Intel Rapid Start Technology support when the system
   15618 	 * is transitioning from Sx and no manageability engine is present
   15619 	 * configure SMBus to restore on reset, disable proxy, and enable
   15620 	 * the reset on MTA (Multicast table array).
   15621 	 */
   15622 	if (sc->sc_phytype == WMPHY_I217) {
   15623 		uint16_t phy_reg;
   15624 
   15625 		if (sc->phy.acquire(sc) != 0)
   15626 			return -1;
   15627 
   15628 		/* Clear Auto Enable LPI after link up */
   15629 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   15630 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15631 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   15632 
   15633 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15634 			/* Restore clear on SMB if no manageability engine
   15635 			 * is present
   15636 			 */
   15637 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   15638 			    &phy_reg);
   15639 			if (rv != 0)
   15640 				goto release;
   15641 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   15642 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   15643 
   15644 			/* Disable Proxy */
   15645 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   15646 		}
   15647 		/* Enable reset on MTA */
   15648 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   15649 		if (rv != 0)
   15650 			goto release;
   15651 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   15652 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   15653 
   15654 release:
   15655 		sc->phy.release(sc);
   15656 		return rv;
   15657 	}
   15658 
   15659 	return 0;
   15660 }
   15661 
   15662 static void
   15663 wm_enable_wakeup(struct wm_softc *sc)
   15664 {
   15665 	uint32_t reg, pmreg;
   15666 	pcireg_t pmode;
   15667 	int rv = 0;
   15668 
   15669 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15670 		device_xname(sc->sc_dev), __func__));
   15671 
   15672 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15673 	    &pmreg, NULL) == 0)
   15674 		return;
   15675 
   15676 	if ((sc->sc_flags & WM_F_WOL) == 0)
   15677 		goto pme;
   15678 
   15679 	/* Advertise the wakeup capability */
   15680 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15681 	    | CTRL_SWDPIN(3));
   15682 
   15683 	/* Keep the laser running on fiber adapters */
   15684 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15685 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15686 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15687 		reg |= CTRL_EXT_SWDPIN(3);
   15688 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15689 	}
   15690 
   15691 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15692 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15693 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15694 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15695 		wm_suspend_workarounds_ich8lan(sc);
   15696 
   15697 #if 0	/* For the multicast packet */
   15698 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15699 	reg |= WUFC_MC;
   15700 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15701 #endif
   15702 
   15703 	if (sc->sc_type >= WM_T_PCH) {
   15704 		rv = wm_enable_phy_wakeup(sc);
   15705 		if (rv != 0)
   15706 			goto pme;
   15707 	} else {
   15708 		/* Enable wakeup by the MAC */
   15709 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15710 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15711 	}
   15712 
   15713 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15714 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15715 		|| (sc->sc_type == WM_T_PCH2))
   15716 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15717 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15718 
   15719 pme:
   15720 	/* Request PME */
   15721 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15722 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   15723 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15724 		/* For WOL */
   15725 		pmode |= PCI_PMCSR_PME_EN;
   15726 	} else {
   15727 		/* Disable WOL */
   15728 		pmode &= ~PCI_PMCSR_PME_EN;
   15729 	}
   15730 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15731 }
   15732 
   15733 /* Disable ASPM L0s and/or L1 for workaround */
   15734 static void
   15735 wm_disable_aspm(struct wm_softc *sc)
   15736 {
   15737 	pcireg_t reg, mask = 0;
   15738 	unsigned const char *str = "";
   15739 
   15740 	/*
   15741 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15742 	 * space.
   15743 	 */
   15744 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15745 		return;
   15746 
   15747 	switch (sc->sc_type) {
   15748 	case WM_T_82571:
   15749 	case WM_T_82572:
   15750 		/*
   15751 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15752 		 * State Power management L1 State (ASPM L1).
   15753 		 */
   15754 		mask = PCIE_LCSR_ASPM_L1;
   15755 		str = "L1 is";
   15756 		break;
   15757 	case WM_T_82573:
   15758 	case WM_T_82574:
   15759 	case WM_T_82583:
   15760 		/*
   15761 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15762 		 *
   15763 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15764 		 * some chipset.  The document of 82574 and 82583 says that
   15765 		 * disabling L0s with some specific chipset is sufficient,
   15766 		 * but we follow as of the Intel em driver does.
   15767 		 *
   15768 		 * References:
   15769 		 * Errata 8 of the Specification Update of i82573.
   15770 		 * Errata 20 of the Specification Update of i82574.
   15771 		 * Errata 9 of the Specification Update of i82583.
   15772 		 */
   15773 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15774 		str = "L0s and L1 are";
   15775 		break;
   15776 	default:
   15777 		return;
   15778 	}
   15779 
   15780 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15781 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15782 	reg &= ~mask;
   15783 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15784 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15785 
   15786 	/* Print only in wm_attach() */
   15787 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15788 		aprint_verbose_dev(sc->sc_dev,
   15789 		    "ASPM %s disabled to workaround the errata.\n", str);
   15790 }
   15791 
   15792 /* LPLU */
   15793 
   15794 static void
   15795 wm_lplu_d0_disable(struct wm_softc *sc)
   15796 {
   15797 	struct mii_data *mii = &sc->sc_mii;
   15798 	uint32_t reg;
   15799 	uint16_t phyval;
   15800 
   15801 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15802 		device_xname(sc->sc_dev), __func__));
   15803 
   15804 	if (sc->sc_phytype == WMPHY_IFE)
   15805 		return;
   15806 
   15807 	switch (sc->sc_type) {
   15808 	case WM_T_82571:
   15809 	case WM_T_82572:
   15810 	case WM_T_82573:
   15811 	case WM_T_82575:
   15812 	case WM_T_82576:
   15813 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
   15814 		phyval &= ~PMR_D0_LPLU;
   15815 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
   15816 		break;
   15817 	case WM_T_82580:
   15818 	case WM_T_I350:
   15819 	case WM_T_I210:
   15820 	case WM_T_I211:
   15821 		reg = CSR_READ(sc, WMREG_PHPM);
   15822 		reg &= ~PHPM_D0A_LPLU;
   15823 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15824 		break;
   15825 	case WM_T_82574:
   15826 	case WM_T_82583:
   15827 	case WM_T_ICH8:
   15828 	case WM_T_ICH9:
   15829 	case WM_T_ICH10:
   15830 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15831 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15832 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15833 		CSR_WRITE_FLUSH(sc);
   15834 		break;
   15835 	case WM_T_PCH:
   15836 	case WM_T_PCH2:
   15837 	case WM_T_PCH_LPT:
   15838 	case WM_T_PCH_SPT:
   15839 	case WM_T_PCH_CNP:
   15840 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15841 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15842 		if (wm_phy_resetisblocked(sc) == false)
   15843 			phyval |= HV_OEM_BITS_ANEGNOW;
   15844 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15845 		break;
   15846 	default:
   15847 		break;
   15848 	}
   15849 }
   15850 
   15851 /* EEE */
   15852 
   15853 static int
   15854 wm_set_eee_i350(struct wm_softc *sc)
   15855 {
   15856 	struct ethercom *ec = &sc->sc_ethercom;
   15857 	uint32_t ipcnfg, eeer;
   15858 	uint32_t ipcnfg_mask
   15859 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15860 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15861 
   15862 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15863 
   15864 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15865 	eeer = CSR_READ(sc, WMREG_EEER);
   15866 
   15867 	/* Enable or disable per user setting */
   15868 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15869 		ipcnfg |= ipcnfg_mask;
   15870 		eeer |= eeer_mask;
   15871 	} else {
   15872 		ipcnfg &= ~ipcnfg_mask;
   15873 		eeer &= ~eeer_mask;
   15874 	}
   15875 
   15876 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15877 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15878 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15879 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15880 
   15881 	return 0;
   15882 }
   15883 
   15884 static int
   15885 wm_set_eee_pchlan(struct wm_softc *sc)
   15886 {
   15887 	device_t dev = sc->sc_dev;
   15888 	struct ethercom *ec = &sc->sc_ethercom;
   15889 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15890 	int rv = 0;
   15891 
   15892 	switch (sc->sc_phytype) {
   15893 	case WMPHY_82579:
   15894 		lpa = I82579_EEE_LP_ABILITY;
   15895 		pcs_status = I82579_EEE_PCS_STATUS;
   15896 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15897 		break;
   15898 	case WMPHY_I217:
   15899 		lpa = I217_EEE_LP_ABILITY;
   15900 		pcs_status = I217_EEE_PCS_STATUS;
   15901 		adv_addr = I217_EEE_ADVERTISEMENT;
   15902 		break;
   15903 	default:
   15904 		return 0;
   15905 	}
   15906 
   15907 	if (sc->phy.acquire(sc)) {
   15908 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15909 		return 0;
   15910 	}
   15911 
   15912 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15913 	if (rv != 0)
   15914 		goto release;
   15915 
   15916 	/* Clear bits that enable EEE in various speeds */
   15917 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15918 
   15919 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15920 		/* Save off link partner's EEE ability */
   15921 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15922 		if (rv != 0)
   15923 			goto release;
   15924 
   15925 		/* Read EEE advertisement */
   15926 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15927 			goto release;
   15928 
   15929 		/*
   15930 		 * Enable EEE only for speeds in which the link partner is
   15931 		 * EEE capable and for which we advertise EEE.
   15932 		 */
   15933 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15934 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15935 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15936 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15937 			if ((data & ANLPAR_TX_FD) != 0)
   15938 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15939 			else {
   15940 				/*
   15941 				 * EEE is not supported in 100Half, so ignore
   15942 				 * partner's EEE in 100 ability if full-duplex
   15943 				 * is not advertised.
   15944 				 */
   15945 				sc->eee_lp_ability
   15946 				    &= ~AN_EEEADVERT_100_TX;
   15947 			}
   15948 		}
   15949 	}
   15950 
   15951 	if (sc->sc_phytype == WMPHY_82579) {
   15952 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15953 		if (rv != 0)
   15954 			goto release;
   15955 
   15956 		data &= ~I82579_LPI_PLL_SHUT_100;
   15957 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15958 	}
   15959 
   15960 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15961 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15962 		goto release;
   15963 
   15964 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15965 release:
   15966 	sc->phy.release(sc);
   15967 
   15968 	return rv;
   15969 }
   15970 
   15971 static int
   15972 wm_set_eee(struct wm_softc *sc)
   15973 {
   15974 	struct ethercom *ec = &sc->sc_ethercom;
   15975 
   15976 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15977 		return 0;
   15978 
   15979 	if (sc->sc_type == WM_T_I354) {
   15980 		/* I354 uses an external PHY */
   15981 		return 0; /* not yet */
   15982 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15983 		return wm_set_eee_i350(sc);
   15984 	else if (sc->sc_type >= WM_T_PCH2)
   15985 		return wm_set_eee_pchlan(sc);
   15986 
   15987 	return 0;
   15988 }
   15989 
   15990 /*
   15991  * Workarounds (mainly PHY related).
   15992  * Basically, PHY's workarounds are in the PHY drivers.
   15993  */
   15994 
   15995 /* Work-around for 82566 Kumeran PCS lock loss */
   15996 static int
   15997 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15998 {
   15999 	struct mii_data *mii = &sc->sc_mii;
   16000 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16001 	int i, reg, rv;
   16002 	uint16_t phyreg;
   16003 
   16004 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16005 		device_xname(sc->sc_dev), __func__));
   16006 
   16007 	/* If the link is not up, do nothing */
   16008 	if ((status & STATUS_LU) == 0)
   16009 		return 0;
   16010 
   16011 	/* Nothing to do if the link is other than 1Gbps */
   16012 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   16013 		return 0;
   16014 
   16015 	for (i = 0; i < 10; i++) {
   16016 		/* read twice */
   16017 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16018 		if (rv != 0)
   16019 			return rv;
   16020 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16021 		if (rv != 0)
   16022 			return rv;
   16023 
   16024 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   16025 			goto out;	/* GOOD! */
   16026 
   16027 		/* Reset the PHY */
   16028 		wm_reset_phy(sc);
   16029 		delay(5*1000);
   16030 	}
   16031 
   16032 	/* Disable GigE link negotiation */
   16033 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16034 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   16035 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16036 
   16037 	/*
   16038 	 * Call gig speed drop workaround on Gig disable before accessing
   16039 	 * any PHY registers.
   16040 	 */
   16041 	wm_gig_downshift_workaround_ich8lan(sc);
   16042 
   16043 out:
   16044 	return 0;
   16045 }
   16046 
   16047 /*
   16048  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   16049  *  @sc: pointer to the HW structure
   16050  *
   16051  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   16052  *  LPLU, Gig disable, MDIC PHY reset):
   16053  *    1) Set Kumeran Near-end loopback
   16054  *    2) Clear Kumeran Near-end loopback
   16055  *  Should only be called for ICH8[m] devices with any 1G Phy.
   16056  */
   16057 static void
   16058 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   16059 {
   16060 	uint16_t kmreg;
   16061 
   16062 	/* Only for igp3 */
   16063 	if (sc->sc_phytype == WMPHY_IGP_3) {
   16064 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   16065 			return;
   16066 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   16067 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   16068 			return;
   16069 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   16070 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   16071 	}
   16072 }
   16073 
   16074 /*
   16075  * Workaround for pch's PHYs
   16076  * XXX should be moved to new PHY driver?
   16077  */
   16078 static int
   16079 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16080 {
   16081 	device_t dev = sc->sc_dev;
   16082 	struct mii_data *mii = &sc->sc_mii;
   16083 	struct mii_softc *child;
   16084 	uint16_t phy_data, phyrev = 0;
   16085 	int phytype = sc->sc_phytype;
   16086 	int rv;
   16087 
   16088 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16089 		device_xname(dev), __func__));
   16090 	KASSERT(sc->sc_type == WM_T_PCH);
   16091 
   16092 	/* Set MDIO slow mode before any other MDIO access */
   16093 	if (phytype == WMPHY_82577)
   16094 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   16095 			return rv;
   16096 
   16097 	child = LIST_FIRST(&mii->mii_phys);
   16098 	if (child != NULL)
   16099 		phyrev = child->mii_mpd_rev;
   16100 
   16101 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   16102 	if ((child != NULL) &&
   16103 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   16104 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   16105 		/* Disable generation of early preamble (0x4431) */
   16106 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16107 		    &phy_data);
   16108 		if (rv != 0)
   16109 			return rv;
   16110 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   16111 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   16112 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16113 		    phy_data);
   16114 		if (rv != 0)
   16115 			return rv;
   16116 
   16117 		/* Preamble tuning for SSC */
   16118 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   16119 		if (rv != 0)
   16120 			return rv;
   16121 	}
   16122 
   16123 	/* 82578 */
   16124 	if (phytype == WMPHY_82578) {
   16125 		/*
   16126 		 * Return registers to default by doing a soft reset then
   16127 		 * writing 0x3140 to the control register
   16128 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   16129 		 */
   16130 		if ((child != NULL) && (phyrev < 2)) {
   16131 			PHY_RESET(child);
   16132 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   16133 			if (rv != 0)
   16134 				return rv;
   16135 		}
   16136 	}
   16137 
   16138 	/* Select page 0 */
   16139 	if ((rv = sc->phy.acquire(sc)) != 0)
   16140 		return rv;
   16141 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
   16142 	sc->phy.release(sc);
   16143 	if (rv != 0)
   16144 		return rv;
   16145 
   16146 	/*
   16147 	 * Configure the K1 Si workaround during phy reset assuming there is
   16148 	 * link so that it disables K1 if link is in 1Gbps.
   16149 	 */
   16150 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   16151 		return rv;
   16152 
   16153 	/* Workaround for link disconnects on a busy hub in half duplex */
   16154 	rv = sc->phy.acquire(sc);
   16155 	if (rv)
   16156 		return rv;
   16157 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   16158 	if (rv)
   16159 		goto release;
   16160 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   16161 	    phy_data & 0x00ff);
   16162 	if (rv)
   16163 		goto release;
   16164 
   16165 	/* Set MSE higher to enable link to stay up when noise is high */
   16166 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   16167 release:
   16168 	sc->phy.release(sc);
   16169 
   16170 	return rv;
   16171 }
   16172 
   16173 /*
   16174  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   16175  *  @sc:   pointer to the HW structure
   16176  */
   16177 static void
   16178 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   16179 {
   16180 
   16181 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16182 		device_xname(sc->sc_dev), __func__));
   16183 
   16184 	if (sc->phy.acquire(sc) != 0)
   16185 		return;
   16186 
   16187 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16188 
   16189 	sc->phy.release(sc);
   16190 }
   16191 
   16192 static void
   16193 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
   16194 {
   16195 	device_t dev = sc->sc_dev;
   16196 	uint32_t mac_reg;
   16197 	uint16_t i, wuce;
   16198 	int count;
   16199 
   16200 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16201 		device_xname(dev), __func__));
   16202 
   16203 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   16204 		return;
   16205 
   16206 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   16207 	count = wm_rar_count(sc);
   16208 	for (i = 0; i < count; i++) {
   16209 		uint16_t lo, hi;
   16210 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16211 		lo = (uint16_t)(mac_reg & 0xffff);
   16212 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   16213 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   16214 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   16215 
   16216 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16217 		lo = (uint16_t)(mac_reg & 0xffff);
   16218 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   16219 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   16220 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   16221 	}
   16222 
   16223 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   16224 }
   16225 
   16226 /*
   16227  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
   16228  *  with 82579 PHY
   16229  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
   16230  */
   16231 static int
   16232 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
   16233 {
   16234 	device_t dev = sc->sc_dev;
   16235 	int rar_count;
   16236 	int rv;
   16237 	uint32_t mac_reg;
   16238 	uint16_t dft_ctrl, data;
   16239 	uint16_t i;
   16240 
   16241 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16242 		device_xname(dev), __func__));
   16243 
   16244 	if (sc->sc_type < WM_T_PCH2)
   16245 		return 0;
   16246 
   16247 	/* Acquire PHY semaphore */
   16248 	rv = sc->phy.acquire(sc);
   16249 	if (rv != 0)
   16250 		return rv;
   16251 
   16252 	/* Disable Rx path while enabling/disabling workaround */
   16253 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
   16254 	if (rv != 0)
   16255 		goto out;
   16256 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16257 	    dft_ctrl | (1 << 14));
   16258 	if (rv != 0)
   16259 		goto out;
   16260 
   16261 	if (enable) {
   16262 		/* Write Rx addresses (rar_entry_count for RAL/H, and
   16263 		 * SHRAL/H) and initial CRC values to the MAC
   16264 		 */
   16265 		rar_count = wm_rar_count(sc);
   16266 		for (i = 0; i < rar_count; i++) {
   16267 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
   16268 			uint32_t addr_high, addr_low;
   16269 
   16270 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16271 			if (!(addr_high & RAL_AV))
   16272 				continue;
   16273 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16274 			mac_addr[0] = (addr_low & 0xFF);
   16275 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
   16276 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
   16277 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
   16278 			mac_addr[4] = (addr_high & 0xFF);
   16279 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
   16280 
   16281 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
   16282 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
   16283 		}
   16284 
   16285 		/* Write Rx addresses to the PHY */
   16286 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16287 	}
   16288 
   16289 	/*
   16290 	 * If enable ==
   16291 	 *	true: Enable jumbo frame workaround in the MAC.
   16292 	 *	false: Write MAC register values back to h/w defaults.
   16293 	 */
   16294 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
   16295 	if (enable) {
   16296 		mac_reg &= ~(1 << 14);
   16297 		mac_reg |= (7 << 15);
   16298 	} else
   16299 		mac_reg &= ~(0xf << 14);
   16300 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
   16301 
   16302 	mac_reg = CSR_READ(sc, WMREG_RCTL);
   16303 	if (enable) {
   16304 		mac_reg |= RCTL_SECRC;
   16305 		sc->sc_rctl |= RCTL_SECRC;
   16306 		sc->sc_flags |= WM_F_CRC_STRIP;
   16307 	} else {
   16308 		mac_reg &= ~RCTL_SECRC;
   16309 		sc->sc_rctl &= ~RCTL_SECRC;
   16310 		sc->sc_flags &= ~WM_F_CRC_STRIP;
   16311 	}
   16312 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
   16313 
   16314 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
   16315 	if (rv != 0)
   16316 		goto out;
   16317 	if (enable)
   16318 		data |= 1 << 0;
   16319 	else
   16320 		data &= ~(1 << 0);
   16321 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
   16322 	if (rv != 0)
   16323 		goto out;
   16324 
   16325 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
   16326 	if (rv != 0)
   16327 		goto out;
   16328 	/*
   16329 	 * XXX FreeBSD and Linux do the same thing that they set the same value
   16330 	 * on both the enable case and the disable case. Is it correct?
   16331 	 */
   16332 	data &= ~(0xf << 8);
   16333 	data |= (0xb << 8);
   16334 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
   16335 	if (rv != 0)
   16336 		goto out;
   16337 
   16338 	/*
   16339 	 * If enable ==
   16340 	 *	true: Enable jumbo frame workaround in the PHY.
   16341 	 *	false: Write PHY register values back to h/w defaults.
   16342 	 */
   16343 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
   16344 	if (rv != 0)
   16345 		goto out;
   16346 	data &= ~(0x7F << 5);
   16347 	if (enable)
   16348 		data |= (0x37 << 5);
   16349 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
   16350 	if (rv != 0)
   16351 		goto out;
   16352 
   16353 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
   16354 	if (rv != 0)
   16355 		goto out;
   16356 	if (enable)
   16357 		data &= ~(1 << 13);
   16358 	else
   16359 		data |= (1 << 13);
   16360 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
   16361 	if (rv != 0)
   16362 		goto out;
   16363 
   16364 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
   16365 	if (rv != 0)
   16366 		goto out;
   16367 	data &= ~(0x3FF << 2);
   16368 	if (enable)
   16369 		data |= (I82579_TX_PTR_GAP << 2);
   16370 	else
   16371 		data |= (0x8 << 2);
   16372 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
   16373 	if (rv != 0)
   16374 		goto out;
   16375 
   16376 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
   16377 	    enable ? 0xf100 : 0x7e00);
   16378 	if (rv != 0)
   16379 		goto out;
   16380 
   16381 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
   16382 	if (rv != 0)
   16383 		goto out;
   16384 	if (enable)
   16385 		data |= 1 << 10;
   16386 	else
   16387 		data &= ~(1 << 10);
   16388 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
   16389 	if (rv != 0)
   16390 		goto out;
   16391 
   16392 	/* Re-enable Rx path after enabling/disabling workaround */
   16393 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16394 	    dft_ctrl & ~(1 << 14));
   16395 
   16396 out:
   16397 	sc->phy.release(sc);
   16398 
   16399 	return rv;
   16400 }
   16401 
   16402 /*
   16403  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   16404  *  done after every PHY reset.
   16405  */
   16406 static int
   16407 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16408 {
   16409 	device_t dev = sc->sc_dev;
   16410 	int rv;
   16411 
   16412 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16413 		device_xname(dev), __func__));
   16414 	KASSERT(sc->sc_type == WM_T_PCH2);
   16415 
   16416 	/* Set MDIO slow mode before any other MDIO access */
   16417 	rv = wm_set_mdio_slow_mode_hv(sc);
   16418 	if (rv != 0)
   16419 		return rv;
   16420 
   16421 	rv = sc->phy.acquire(sc);
   16422 	if (rv != 0)
   16423 		return rv;
   16424 	/* Set MSE higher to enable link to stay up when noise is high */
   16425 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   16426 	if (rv != 0)
   16427 		goto release;
   16428 	/* Drop link after 5 times MSE threshold was reached */
   16429 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   16430 release:
   16431 	sc->phy.release(sc);
   16432 
   16433 	return rv;
   16434 }
   16435 
   16436 /**
   16437  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   16438  *  @link: link up bool flag
   16439  *
   16440  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   16441  *  preventing further DMA write requests.  Workaround the issue by disabling
   16442  *  the de-assertion of the clock request when in 1Gpbs mode.
   16443  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   16444  *  speeds in order to avoid Tx hangs.
   16445  **/
   16446 static int
   16447 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   16448 {
   16449 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   16450 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16451 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   16452 	uint16_t phyreg;
   16453 
   16454 	if (link && (speed == STATUS_SPEED_1000)) {
   16455 		sc->phy.acquire(sc);
   16456 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16457 		    &phyreg);
   16458 		if (rv != 0)
   16459 			goto release;
   16460 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16461 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   16462 		if (rv != 0)
   16463 			goto release;
   16464 		delay(20);
   16465 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   16466 
   16467 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16468 		    &phyreg);
   16469 release:
   16470 		sc->phy.release(sc);
   16471 		return rv;
   16472 	}
   16473 
   16474 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   16475 
   16476 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   16477 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   16478 	    || !link
   16479 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   16480 		goto update_fextnvm6;
   16481 
   16482 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   16483 
   16484 	/* Clear link status transmit timeout */
   16485 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   16486 	if (speed == STATUS_SPEED_100) {
   16487 		/* Set inband Tx timeout to 5x10us for 100Half */
   16488 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16489 
   16490 		/* Do not extend the K1 entry latency for 100Half */
   16491 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16492 	} else {
   16493 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   16494 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16495 
   16496 		/* Extend the K1 entry latency for 10 Mbps */
   16497 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16498 	}
   16499 
   16500 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   16501 
   16502 update_fextnvm6:
   16503 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   16504 	return 0;
   16505 }
   16506 
   16507 /*
   16508  *  wm_k1_gig_workaround_hv - K1 Si workaround
   16509  *  @sc:   pointer to the HW structure
   16510  *  @link: link up bool flag
   16511  *
   16512  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   16513  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   16514  *  If link is down, the function will restore the default K1 setting located
   16515  *  in the NVM.
   16516  */
   16517 static int
   16518 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   16519 {
   16520 	int k1_enable = sc->sc_nvm_k1_enabled;
   16521 
   16522 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16523 		device_xname(sc->sc_dev), __func__));
   16524 
   16525 	if (sc->phy.acquire(sc) != 0)
   16526 		return -1;
   16527 
   16528 	if (link) {
   16529 		k1_enable = 0;
   16530 
   16531 		/* Link stall fix for link up */
   16532 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16533 		    0x0100);
   16534 	} else {
   16535 		/* Link stall fix for link down */
   16536 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16537 		    0x4100);
   16538 	}
   16539 
   16540 	wm_configure_k1_ich8lan(sc, k1_enable);
   16541 	sc->phy.release(sc);
   16542 
   16543 	return 0;
   16544 }
   16545 
   16546 /*
   16547  *  wm_k1_workaround_lv - K1 Si workaround
   16548  *  @sc:   pointer to the HW structure
   16549  *
   16550  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   16551  *  Disable K1 for 1000 and 100 speeds
   16552  */
   16553 static int
   16554 wm_k1_workaround_lv(struct wm_softc *sc)
   16555 {
   16556 	uint32_t reg;
   16557 	uint16_t phyreg;
   16558 	int rv;
   16559 
   16560 	if (sc->sc_type != WM_T_PCH2)
   16561 		return 0;
   16562 
   16563 	/* Set K1 beacon duration based on 10Mbps speed */
   16564 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   16565 	if (rv != 0)
   16566 		return rv;
   16567 
   16568 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   16569 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   16570 		if (phyreg &
   16571 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   16572 			/* LV 1G/100 Packet drop issue wa  */
   16573 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   16574 			    &phyreg);
   16575 			if (rv != 0)
   16576 				return rv;
   16577 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   16578 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   16579 			    phyreg);
   16580 			if (rv != 0)
   16581 				return rv;
   16582 		} else {
   16583 			/* For 10Mbps */
   16584 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   16585 			reg &= ~FEXTNVM4_BEACON_DURATION;
   16586 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   16587 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   16588 		}
   16589 	}
   16590 
   16591 	return 0;
   16592 }
   16593 
   16594 /*
   16595  *  wm_link_stall_workaround_hv - Si workaround
   16596  *  @sc: pointer to the HW structure
   16597  *
   16598  *  This function works around a Si bug where the link partner can get
   16599  *  a link up indication before the PHY does. If small packets are sent
   16600  *  by the link partner they can be placed in the packet buffer without
   16601  *  being properly accounted for by the PHY and will stall preventing
   16602  *  further packets from being received.  The workaround is to clear the
   16603  *  packet buffer after the PHY detects link up.
   16604  */
   16605 static int
   16606 wm_link_stall_workaround_hv(struct wm_softc *sc)
   16607 {
   16608 	uint16_t phyreg;
   16609 
   16610 	if (sc->sc_phytype != WMPHY_82578)
   16611 		return 0;
   16612 
   16613 	/* Do not apply workaround if in PHY loopback bit 14 set */
   16614 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   16615 	if ((phyreg & BMCR_LOOP) != 0)
   16616 		return 0;
   16617 
   16618 	/* Check if link is up and at 1Gbps */
   16619 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   16620 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16621 	    | BM_CS_STATUS_SPEED_MASK;
   16622 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16623 		| BM_CS_STATUS_SPEED_1000))
   16624 		return 0;
   16625 
   16626 	delay(200 * 1000);	/* XXX too big */
   16627 
   16628 	/* Flush the packets in the fifo buffer */
   16629 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16630 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   16631 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16632 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   16633 
   16634 	return 0;
   16635 }
   16636 
   16637 static int
   16638 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   16639 {
   16640 	int rv;
   16641 	uint16_t reg;
   16642 
   16643 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   16644 	if (rv != 0)
   16645 		return rv;
   16646 
   16647 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   16648 	    reg | HV_KMRN_MDIO_SLOW);
   16649 }
   16650 
   16651 /*
   16652  *  wm_configure_k1_ich8lan - Configure K1 power state
   16653  *  @sc: pointer to the HW structure
   16654  *  @enable: K1 state to configure
   16655  *
   16656  *  Configure the K1 power state based on the provided parameter.
   16657  *  Assumes semaphore already acquired.
   16658  */
   16659 static void
   16660 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   16661 {
   16662 	uint32_t ctrl, ctrl_ext, tmp;
   16663 	uint16_t kmreg;
   16664 	int rv;
   16665 
   16666 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16667 
   16668 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   16669 	if (rv != 0)
   16670 		return;
   16671 
   16672 	if (k1_enable)
   16673 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   16674 	else
   16675 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   16676 
   16677 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   16678 	if (rv != 0)
   16679 		return;
   16680 
   16681 	delay(20);
   16682 
   16683 	ctrl = CSR_READ(sc, WMREG_CTRL);
   16684 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   16685 
   16686 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   16687 	tmp |= CTRL_FRCSPD;
   16688 
   16689 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   16690 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   16691 	CSR_WRITE_FLUSH(sc);
   16692 	delay(20);
   16693 
   16694 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   16695 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   16696 	CSR_WRITE_FLUSH(sc);
   16697 	delay(20);
   16698 
   16699 	return;
   16700 }
   16701 
   16702 /* special case - for 82575 - need to do manual init ... */
   16703 static void
   16704 wm_reset_init_script_82575(struct wm_softc *sc)
   16705 {
   16706 	/*
   16707 	 * Remark: this is untested code - we have no board without EEPROM
   16708 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   16709 	 */
   16710 
   16711 	/* SerDes configuration via SERDESCTRL */
   16712 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   16713 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   16714 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   16715 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   16716 
   16717 	/* CCM configuration via CCMCTL register */
   16718 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   16719 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   16720 
   16721 	/* PCIe lanes configuration */
   16722 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   16723 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   16724 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   16725 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   16726 
   16727 	/* PCIe PLL Configuration */
   16728 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   16729 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   16730 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   16731 }
   16732 
   16733 static void
   16734 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   16735 {
   16736 	uint32_t reg;
   16737 	uint16_t nvmword;
   16738 	int rv;
   16739 
   16740 	if (sc->sc_type != WM_T_82580)
   16741 		return;
   16742 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   16743 		return;
   16744 
   16745 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   16746 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   16747 	if (rv != 0) {
   16748 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   16749 		    __func__);
   16750 		return;
   16751 	}
   16752 
   16753 	reg = CSR_READ(sc, WMREG_MDICNFG);
   16754 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   16755 		reg |= MDICNFG_DEST;
   16756 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   16757 		reg |= MDICNFG_COM_MDIO;
   16758 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16759 }
   16760 
   16761 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   16762 
   16763 static bool
   16764 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   16765 {
   16766 	uint32_t reg;
   16767 	uint16_t id1, id2;
   16768 	int i, rv;
   16769 
   16770 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16771 		device_xname(sc->sc_dev), __func__));
   16772 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16773 
   16774 	id1 = id2 = 0xffff;
   16775 	for (i = 0; i < 2; i++) {
   16776 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   16777 		    &id1);
   16778 		if ((rv != 0) || MII_INVALIDID(id1))
   16779 			continue;
   16780 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   16781 		    &id2);
   16782 		if ((rv != 0) || MII_INVALIDID(id2))
   16783 			continue;
   16784 		break;
   16785 	}
   16786 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   16787 		goto out;
   16788 
   16789 	/*
   16790 	 * In case the PHY needs to be in mdio slow mode,
   16791 	 * set slow mode and try to get the PHY id again.
   16792 	 */
   16793 	rv = 0;
   16794 	if (sc->sc_type < WM_T_PCH_LPT) {
   16795 		sc->phy.release(sc);
   16796 		wm_set_mdio_slow_mode_hv(sc);
   16797 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   16798 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   16799 		sc->phy.acquire(sc);
   16800 	}
   16801 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   16802 		device_printf(sc->sc_dev, "XXX return with false\n");
   16803 		return false;
   16804 	}
   16805 out:
   16806 	if (sc->sc_type >= WM_T_PCH_LPT) {
   16807 		/* Only unforce SMBus if ME is not active */
   16808 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16809 			uint16_t phyreg;
   16810 
   16811 			/* Unforce SMBus mode in PHY */
   16812 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   16813 			    CV_SMB_CTRL, &phyreg);
   16814 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   16815 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   16816 			    CV_SMB_CTRL, phyreg);
   16817 
   16818 			/* Unforce SMBus mode in MAC */
   16819 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16820 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16821 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16822 		}
   16823 	}
   16824 	return true;
   16825 }
   16826 
   16827 static void
   16828 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   16829 {
   16830 	uint32_t reg;
   16831 	int i;
   16832 
   16833 	/* Set PHY Config Counter to 50msec */
   16834 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   16835 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   16836 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   16837 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   16838 
   16839 	/* Toggle LANPHYPC */
   16840 	reg = CSR_READ(sc, WMREG_CTRL);
   16841 	reg |= CTRL_LANPHYPC_OVERRIDE;
   16842 	reg &= ~CTRL_LANPHYPC_VALUE;
   16843 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16844 	CSR_WRITE_FLUSH(sc);
   16845 	delay(1000);
   16846 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   16847 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16848 	CSR_WRITE_FLUSH(sc);
   16849 
   16850 	if (sc->sc_type < WM_T_PCH_LPT)
   16851 		delay(50 * 1000);
   16852 	else {
   16853 		i = 20;
   16854 
   16855 		do {
   16856 			delay(5 * 1000);
   16857 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   16858 		    && i--);
   16859 
   16860 		delay(30 * 1000);
   16861 	}
   16862 }
   16863 
   16864 static int
   16865 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   16866 {
   16867 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16868 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16869 	uint32_t rxa;
   16870 	uint16_t scale = 0, lat_enc = 0;
   16871 	int32_t obff_hwm = 0;
   16872 	int64_t lat_ns, value;
   16873 
   16874 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16875 		device_xname(sc->sc_dev), __func__));
   16876 
   16877 	if (link) {
   16878 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16879 		uint32_t status;
   16880 		uint16_t speed;
   16881 		pcireg_t preg;
   16882 
   16883 		status = CSR_READ(sc, WMREG_STATUS);
   16884 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16885 		case STATUS_SPEED_10:
   16886 			speed = 10;
   16887 			break;
   16888 		case STATUS_SPEED_100:
   16889 			speed = 100;
   16890 			break;
   16891 		case STATUS_SPEED_1000:
   16892 			speed = 1000;
   16893 			break;
   16894 		default:
   16895 			device_printf(sc->sc_dev, "Unknown speed "
   16896 			    "(status = %08x)\n", status);
   16897 			return -1;
   16898 		}
   16899 
   16900 		/* Rx Packet Buffer Allocation size (KB) */
   16901 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16902 
   16903 		/*
   16904 		 * Determine the maximum latency tolerated by the device.
   16905 		 *
   16906 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16907 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16908 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16909 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16910 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16911 		 */
   16912 		lat_ns = ((int64_t)rxa * 1024 -
   16913 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16914 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16915 		if (lat_ns < 0)
   16916 			lat_ns = 0;
   16917 		else
   16918 			lat_ns /= speed;
   16919 		value = lat_ns;
   16920 
   16921 		while (value > LTRV_VALUE) {
   16922 			scale ++;
   16923 			value = howmany(value, __BIT(5));
   16924 		}
   16925 		if (scale > LTRV_SCALE_MAX) {
   16926 			device_printf(sc->sc_dev,
   16927 			    "Invalid LTR latency scale %d\n", scale);
   16928 			return -1;
   16929 		}
   16930 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16931 
   16932 		/* Determine the maximum latency tolerated by the platform */
   16933 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16934 		    WM_PCI_LTR_CAP_LPT);
   16935 		max_snoop = preg & 0xffff;
   16936 		max_nosnoop = preg >> 16;
   16937 
   16938 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16939 
   16940 		if (lat_enc > max_ltr_enc) {
   16941 			lat_enc = max_ltr_enc;
   16942 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16943 			    * PCI_LTR_SCALETONS(
   16944 				    __SHIFTOUT(lat_enc,
   16945 					PCI_LTR_MAXSNOOPLAT_SCALE));
   16946 		}
   16947 
   16948 		if (lat_ns) {
   16949 			lat_ns *= speed * 1000;
   16950 			lat_ns /= 8;
   16951 			lat_ns /= 1000000000;
   16952 			obff_hwm = (int32_t)(rxa - lat_ns);
   16953 		}
   16954 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   16955 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   16956 			    "(rxa = %d, lat_ns = %d)\n",
   16957 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   16958 			return -1;
   16959 		}
   16960 	}
   16961 	/* Snoop and No-Snoop latencies the same */
   16962 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   16963 	CSR_WRITE(sc, WMREG_LTRV, reg);
   16964 
   16965 	/* Set OBFF high water mark */
   16966 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   16967 	reg |= obff_hwm;
   16968 	CSR_WRITE(sc, WMREG_SVT, reg);
   16969 
   16970 	/* Enable OBFF */
   16971 	reg = CSR_READ(sc, WMREG_SVCR);
   16972 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   16973 	CSR_WRITE(sc, WMREG_SVCR, reg);
   16974 
   16975 	return 0;
   16976 }
   16977 
   16978 /*
   16979  * I210 Errata 25 and I211 Errata 10
   16980  * Slow System Clock.
   16981  *
   16982  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   16983  */
   16984 static int
   16985 wm_pll_workaround_i210(struct wm_softc *sc)
   16986 {
   16987 	uint32_t mdicnfg, wuc;
   16988 	uint32_t reg;
   16989 	pcireg_t pcireg;
   16990 	uint32_t pmreg;
   16991 	uint16_t nvmword, tmp_nvmword;
   16992 	uint16_t phyval;
   16993 	bool wa_done = false;
   16994 	int i, rv = 0;
   16995 
   16996 	/* Get Power Management cap offset */
   16997 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16998 	    &pmreg, NULL) == 0)
   16999 		return -1;
   17000 
   17001 	/* Save WUC and MDICNFG registers */
   17002 	wuc = CSR_READ(sc, WMREG_WUC);
   17003 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   17004 
   17005 	reg = mdicnfg & ~MDICNFG_DEST;
   17006 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17007 
   17008 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   17009 		/*
   17010 		 * The default value of the Initialization Control Word 1
   17011 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   17012 		 */
   17013 		nvmword = INVM_DEFAULT_AL;
   17014 	}
   17015 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   17016 
   17017 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   17018 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   17019 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   17020 
   17021 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   17022 			rv = 0;
   17023 			break; /* OK */
   17024 		} else
   17025 			rv = -1;
   17026 
   17027 		wa_done = true;
   17028 		/* Directly reset the internal PHY */
   17029 		reg = CSR_READ(sc, WMREG_CTRL);
   17030 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   17031 
   17032 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17033 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   17034 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17035 
   17036 		CSR_WRITE(sc, WMREG_WUC, 0);
   17037 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   17038 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17039 
   17040 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17041 		    pmreg + PCI_PMCSR);
   17042 		pcireg |= PCI_PMCSR_STATE_D3;
   17043 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17044 		    pmreg + PCI_PMCSR, pcireg);
   17045 		delay(1000);
   17046 		pcireg &= ~PCI_PMCSR_STATE_D3;
   17047 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17048 		    pmreg + PCI_PMCSR, pcireg);
   17049 
   17050 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   17051 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17052 
   17053 		/* Restore WUC register */
   17054 		CSR_WRITE(sc, WMREG_WUC, wuc);
   17055 	}
   17056 
   17057 	/* Restore MDICNFG setting */
   17058 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   17059 	if (wa_done)
   17060 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   17061 	return rv;
   17062 }
   17063 
   17064 static void
   17065 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   17066 {
   17067 	uint32_t reg;
   17068 
   17069 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17070 		device_xname(sc->sc_dev), __func__));
   17071 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   17072 	    || (sc->sc_type == WM_T_PCH_CNP));
   17073 
   17074 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   17075 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   17076 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   17077 
   17078 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   17079 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   17080 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   17081 }
   17082 
   17083 /* Sysctl function */
   17084 #ifdef WM_DEBUG
   17085 static int
   17086 wm_sysctl_debug(SYSCTLFN_ARGS)
   17087 {
   17088 	struct sysctlnode node = *rnode;
   17089 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
   17090 	uint32_t dflags;
   17091 	int error;
   17092 
   17093 	dflags = sc->sc_debug;
   17094 	node.sysctl_data = &dflags;
   17095 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   17096 
   17097 	if (error || newp == NULL)
   17098 		return error;
   17099 
   17100 	sc->sc_debug = dflags;
   17101 
   17102 	return 0;
   17103 }
   17104 #endif
   17105