Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.746
      1 /*	$NetBSD: if_wm.c,v 1.746 2022/07/22 05:23:50 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.746 2022/07/22 05:23:50 msaitoh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 
     94 #include <sys/atomic.h>
     95 #include <sys/callout.h>
     96 #include <sys/cpu.h>
     97 #include <sys/device.h>
     98 #include <sys/errno.h>
     99 #include <sys/interrupt.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/kmem.h>
    102 #include <sys/kernel.h>
    103 #include <sys/mbuf.h>
    104 #include <sys/pcq.h>
    105 #include <sys/queue.h>
    106 #include <sys/rndsource.h>
    107 #include <sys/socket.h>
    108 #include <sys/sysctl.h>
    109 #include <sys/syslog.h>
    110 #include <sys/systm.h>
    111 #include <sys/workqueue.h>
    112 
    113 #include <net/if.h>
    114 #include <net/if_dl.h>
    115 #include <net/if_media.h>
    116 #include <net/if_ether.h>
    117 
    118 #include <net/bpf.h>
    119 
    120 #include <net/rss_config.h>
    121 
    122 #include <netinet/in.h>			/* XXX for struct ip */
    123 #include <netinet/in_systm.h>		/* XXX for struct ip */
    124 #include <netinet/ip.h>			/* XXX for struct ip */
    125 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    126 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    127 
    128 #include <sys/bus.h>
    129 #include <sys/intr.h>
    130 #include <machine/endian.h>
    131 
    132 #include <dev/mii/mii.h>
    133 #include <dev/mii/mdio.h>
    134 #include <dev/mii/miivar.h>
    135 #include <dev/mii/miidevs.h>
    136 #include <dev/mii/mii_bitbang.h>
    137 #include <dev/mii/ikphyreg.h>
    138 #include <dev/mii/igphyreg.h>
    139 #include <dev/mii/igphyvar.h>
    140 #include <dev/mii/inbmphyreg.h>
    141 #include <dev/mii/ihphyreg.h>
    142 #include <dev/mii/makphyreg.h>
    143 
    144 #include <dev/pci/pcireg.h>
    145 #include <dev/pci/pcivar.h>
    146 #include <dev/pci/pcidevs.h>
    147 
    148 #include <dev/pci/if_wmreg.h>
    149 #include <dev/pci/if_wmvar.h>
    150 
    151 #ifdef WM_DEBUG
    152 #define	WM_DEBUG_LINK		__BIT(0)
    153 #define	WM_DEBUG_TX		__BIT(1)
    154 #define	WM_DEBUG_RX		__BIT(2)
    155 #define	WM_DEBUG_GMII		__BIT(3)
    156 #define	WM_DEBUG_MANAGE		__BIT(4)
    157 #define	WM_DEBUG_NVM		__BIT(5)
    158 #define	WM_DEBUG_INIT		__BIT(6)
    159 #define	WM_DEBUG_LOCK		__BIT(7)
    160 
    161 #if 0
    162 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
    163 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
    164 	WM_DEBUG_LOCK
    165 #endif
    166 
    167 #define	DPRINTF(sc, x, y)			  \
    168 	do {					  \
    169 		if ((sc)->sc_debug & (x))	  \
    170 			printf y;		  \
    171 	} while (0)
    172 #else
    173 #define	DPRINTF(sc, x, y)	__nothing
    174 #endif /* WM_DEBUG */
    175 
    176 #ifdef NET_MPSAFE
    177 #define WM_MPSAFE	1
    178 #define WM_CALLOUT_FLAGS	CALLOUT_MPSAFE
    179 #define WM_SOFTINT_FLAGS	SOFTINT_MPSAFE
    180 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    181 #else
    182 #define WM_CALLOUT_FLAGS	0
    183 #define WM_SOFTINT_FLAGS	0
    184 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU
    185 #endif
    186 
    187 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    188 
    189 /*
    190  * This device driver's max interrupt numbers.
    191  */
    192 #define WM_MAX_NQUEUEINTR	16
    193 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    194 
    195 #ifndef WM_DISABLE_MSI
    196 #define	WM_DISABLE_MSI 0
    197 #endif
    198 #ifndef WM_DISABLE_MSIX
    199 #define	WM_DISABLE_MSIX 0
    200 #endif
    201 
    202 int wm_disable_msi = WM_DISABLE_MSI;
    203 int wm_disable_msix = WM_DISABLE_MSIX;
    204 
    205 #ifndef WM_WATCHDOG_TIMEOUT
    206 #define WM_WATCHDOG_TIMEOUT 5
    207 #endif
    208 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    209 
    210 /*
    211  * Transmit descriptor list size.  Due to errata, we can only have
    212  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    213  * on >= 82544. We tell the upper layers that they can queue a lot
    214  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    215  * of them at a time.
    216  *
    217  * We allow up to 64 DMA segments per packet.  Pathological packet
    218  * chains containing many small mbufs have been observed in zero-copy
    219  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    220  * m_defrag() is called to reduce it.
    221  */
    222 #define	WM_NTXSEGS		64
    223 #define	WM_IFQUEUELEN		256
    224 #define	WM_TXQUEUELEN_MAX	64
    225 #define	WM_TXQUEUELEN_MAX_82547	16
    226 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    227 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    228 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    229 #define	WM_NTXDESC_82542	256
    230 #define	WM_NTXDESC_82544	4096
    231 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    232 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    233 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    234 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    235 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    236 
    237 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    238 
    239 #define	WM_TXINTERQSIZE		256
    240 
    241 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    242 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    243 #endif
    244 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    245 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    246 #endif
    247 
    248 /*
    249  * Receive descriptor list size.  We have one Rx buffer for normal
    250  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    251  * packet.  We allocate 256 receive descriptors, each with a 2k
    252  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    253  */
    254 #define	WM_NRXDESC		256U
    255 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    256 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    257 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    258 
    259 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    260 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    261 #endif
    262 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    263 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    264 #endif
    265 
    266 typedef union txdescs {
    267 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    268 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    269 } txdescs_t;
    270 
    271 typedef union rxdescs {
    272 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    273 	ext_rxdesc_t	 sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    274 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    275 } rxdescs_t;
    276 
    277 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    278 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    279 
    280 /*
    281  * Software state for transmit jobs.
    282  */
    283 struct wm_txsoft {
    284 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    285 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    286 	int txs_firstdesc;		/* first descriptor in packet */
    287 	int txs_lastdesc;		/* last descriptor in packet */
    288 	int txs_ndesc;			/* # of descriptors used */
    289 };
    290 
    291 /*
    292  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    293  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    294  * them together.
    295  */
    296 struct wm_rxsoft {
    297 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    298 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    299 };
    300 
    301 #define WM_LINKUP_TIMEOUT	50
    302 
    303 static uint16_t swfwphysem[] = {
    304 	SWFW_PHY0_SM,
    305 	SWFW_PHY1_SM,
    306 	SWFW_PHY2_SM,
    307 	SWFW_PHY3_SM
    308 };
    309 
    310 static const uint32_t wm_82580_rxpbs_table[] = {
    311 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    312 };
    313 
    314 struct wm_softc;
    315 
    316 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
    317 #if !defined(WM_EVENT_COUNTERS)
    318 #define WM_EVENT_COUNTERS 1
    319 #endif
    320 #endif
    321 
    322 #ifdef WM_EVENT_COUNTERS
    323 #define WM_Q_EVCNT_DEFINE(qname, evname)				 \
    324 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    325 	struct evcnt qname##_ev_##evname
    326 
    327 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    328 	do {								\
    329 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    330 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    331 		    "%s%02d%s", #qname, (qnum), #evname);		\
    332 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    333 		    (evtype), NULL, (xname),				\
    334 		    (q)->qname##_##evname##_evcnt_name);		\
    335 	} while (0)
    336 
    337 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    338 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    339 
    340 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    341 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    342 
    343 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    344 	evcnt_detach(&(q)->qname##_ev_##evname)
    345 #endif /* WM_EVENT_COUNTERS */
    346 
    347 struct wm_txqueue {
    348 	kmutex_t *txq_lock;		/* lock for tx operations */
    349 
    350 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    351 
    352 	/* Software state for the transmit descriptors. */
    353 	int txq_num;			/* must be a power of two */
    354 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    355 
    356 	/* TX control data structures. */
    357 	int txq_ndesc;			/* must be a power of two */
    358 	size_t txq_descsize;		/* a tx descriptor size */
    359 	txdescs_t *txq_descs_u;
    360 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    361 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    362 	int txq_desc_rseg;		/* real number of control segment */
    363 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    364 #define	txq_descs	txq_descs_u->sctxu_txdescs
    365 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    366 
    367 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    368 
    369 	int txq_free;			/* number of free Tx descriptors */
    370 	int txq_next;			/* next ready Tx descriptor */
    371 
    372 	int txq_sfree;			/* number of free Tx jobs */
    373 	int txq_snext;			/* next free Tx job */
    374 	int txq_sdirty;			/* dirty Tx jobs */
    375 
    376 	/* These 4 variables are used only on the 82547. */
    377 	int txq_fifo_size;		/* Tx FIFO size */
    378 	int txq_fifo_head;		/* current head of FIFO */
    379 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    380 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    381 
    382 	/*
    383 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    384 	 * CPUs. This queue intermediate them without block.
    385 	 */
    386 	pcq_t *txq_interq;
    387 
    388 	/*
    389 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    390 	 * to manage Tx H/W queue's busy flag.
    391 	 */
    392 	int txq_flags;			/* flags for H/W queue, see below */
    393 #define	WM_TXQ_NO_SPACE		0x1
    394 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
    395 
    396 	bool txq_stopping;
    397 
    398 	bool txq_sending;
    399 	time_t txq_lastsent;
    400 
    401 	/* Checksum flags used for previous packet */
    402 	uint32_t	txq_last_hw_cmd;
    403 	uint8_t		txq_last_hw_fields;
    404 	uint16_t	txq_last_hw_ipcs;
    405 	uint16_t	txq_last_hw_tucs;
    406 
    407 	uint32_t txq_packets;		/* for AIM */
    408 	uint32_t txq_bytes;		/* for AIM */
    409 #ifdef WM_EVENT_COUNTERS
    410 	/* TX event counters */
    411 	WM_Q_EVCNT_DEFINE(txq, txsstall);   /* Stalled due to no txs */
    412 	WM_Q_EVCNT_DEFINE(txq, txdstall);   /* Stalled due to no txd */
    413 	WM_Q_EVCNT_DEFINE(txq, fifo_stall); /* FIFO stalls (82547) */
    414 	WM_Q_EVCNT_DEFINE(txq, txdw);	    /* Tx descriptor interrupts */
    415 	WM_Q_EVCNT_DEFINE(txq, txqe);	    /* Tx queue empty interrupts */
    416 					    /* XXX not used? */
    417 
    418 	WM_Q_EVCNT_DEFINE(txq, ipsum);	    /* IP checksums comp. */
    419 	WM_Q_EVCNT_DEFINE(txq, tusum);	    /* TCP/UDP cksums comp. */
    420 	WM_Q_EVCNT_DEFINE(txq, tusum6);	    /* TCP/UDP v6 cksums comp. */
    421 	WM_Q_EVCNT_DEFINE(txq, tso);	    /* TCP seg offload (IPv4) */
    422 	WM_Q_EVCNT_DEFINE(txq, tso6);	    /* TCP seg offload (IPv6) */
    423 	WM_Q_EVCNT_DEFINE(txq, tsopain);    /* Painful header manip. for TSO */
    424 	WM_Q_EVCNT_DEFINE(txq, pcqdrop);    /* Pkt dropped in pcq */
    425 	WM_Q_EVCNT_DEFINE(txq, descdrop);   /* Pkt dropped in MAC desc ring */
    426 					    /* other than toomanyseg */
    427 
    428 	WM_Q_EVCNT_DEFINE(txq, toomanyseg); /* Pkt dropped(toomany DMA segs) */
    429 	WM_Q_EVCNT_DEFINE(txq, defrag);	    /* m_defrag() */
    430 	WM_Q_EVCNT_DEFINE(txq, underrun);   /* Tx underrun */
    431 	WM_Q_EVCNT_DEFINE(txq, skipcontext); /* Tx skip wrong cksum context */
    432 
    433 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    434 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    435 #endif /* WM_EVENT_COUNTERS */
    436 };
    437 
    438 struct wm_rxqueue {
    439 	kmutex_t *rxq_lock;		/* lock for rx operations */
    440 
    441 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    442 
    443 	/* Software state for the receive descriptors. */
    444 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    445 
    446 	/* RX control data structures. */
    447 	int rxq_ndesc;			/* must be a power of two */
    448 	size_t rxq_descsize;		/* a rx descriptor size */
    449 	rxdescs_t *rxq_descs_u;
    450 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    451 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    452 	int rxq_desc_rseg;		/* real number of control segment */
    453 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    454 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    455 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    456 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    457 
    458 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    459 
    460 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    461 	int rxq_discard;
    462 	int rxq_len;
    463 	struct mbuf *rxq_head;
    464 	struct mbuf *rxq_tail;
    465 	struct mbuf **rxq_tailp;
    466 
    467 	bool rxq_stopping;
    468 
    469 	uint32_t rxq_packets;		/* for AIM */
    470 	uint32_t rxq_bytes;		/* for AIM */
    471 #ifdef WM_EVENT_COUNTERS
    472 	/* RX event counters */
    473 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    474 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    475 
    476 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    477 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    478 #endif
    479 };
    480 
    481 struct wm_queue {
    482 	int wmq_id;			/* index of TX/RX queues */
    483 	int wmq_intr_idx;		/* index of MSI-X tables */
    484 
    485 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    486 	bool wmq_set_itr;
    487 
    488 	struct wm_txqueue wmq_txq;
    489 	struct wm_rxqueue wmq_rxq;
    490 	char sysctlname[32];		/* Name for sysctl */
    491 
    492 	bool wmq_txrx_use_workqueue;
    493 	struct work wmq_cookie;
    494 	void *wmq_si;
    495 };
    496 
    497 struct wm_phyop {
    498 	int (*acquire)(struct wm_softc *);
    499 	void (*release)(struct wm_softc *);
    500 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    501 	int (*writereg_locked)(device_t, int, int, uint16_t);
    502 	int reset_delay_us;
    503 	bool no_errprint;
    504 };
    505 
    506 struct wm_nvmop {
    507 	int (*acquire)(struct wm_softc *);
    508 	void (*release)(struct wm_softc *);
    509 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    510 };
    511 
    512 /*
    513  * Software state per device.
    514  */
    515 struct wm_softc {
    516 	device_t sc_dev;		/* generic device information */
    517 	bus_space_tag_t sc_st;		/* bus space tag */
    518 	bus_space_handle_t sc_sh;	/* bus space handle */
    519 	bus_size_t sc_ss;		/* bus space size */
    520 	bus_space_tag_t sc_iot;		/* I/O space tag */
    521 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    522 	bus_size_t sc_ios;		/* I/O space size */
    523 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    524 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    525 	bus_size_t sc_flashs;		/* flash registers space size */
    526 	off_t sc_flashreg_offset;	/*
    527 					 * offset to flash registers from
    528 					 * start of BAR
    529 					 */
    530 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    531 
    532 	struct ethercom sc_ethercom;	/* Ethernet common data */
    533 	struct mii_data sc_mii;		/* MII/media information */
    534 
    535 	pci_chipset_tag_t sc_pc;
    536 	pcitag_t sc_pcitag;
    537 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    538 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    539 
    540 	uint16_t sc_pcidevid;		/* PCI device ID */
    541 	wm_chip_type sc_type;		/* MAC type */
    542 	int sc_rev;			/* MAC revision */
    543 	wm_phy_type sc_phytype;		/* PHY type */
    544 	uint8_t sc_sfptype;		/* SFP type */
    545 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    546 #define	WM_MEDIATYPE_UNKNOWN		0x00
    547 #define	WM_MEDIATYPE_FIBER		0x01
    548 #define	WM_MEDIATYPE_COPPER		0x02
    549 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    550 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    551 	int sc_flags;			/* flags; see below */
    552 	u_short sc_if_flags;		/* last if_flags */
    553 	int sc_ec_capenable;		/* last ec_capenable */
    554 	int sc_flowflags;		/* 802.3x flow control flags */
    555 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    556 	int sc_align_tweak;
    557 
    558 	void *sc_ihs[WM_MAX_NINTR];	/*
    559 					 * interrupt cookie.
    560 					 * - legacy and msi use sc_ihs[0] only
    561 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    562 					 */
    563 	pci_intr_handle_t *sc_intrs;	/*
    564 					 * legacy and msi use sc_intrs[0] only
    565 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    566 					 */
    567 	int sc_nintrs;			/* number of interrupts */
    568 
    569 	int sc_link_intr_idx;		/* index of MSI-X tables */
    570 
    571 	callout_t sc_tick_ch;		/* tick callout */
    572 	bool sc_core_stopping;
    573 
    574 	int sc_nvm_ver_major;
    575 	int sc_nvm_ver_minor;
    576 	int sc_nvm_ver_build;
    577 	int sc_nvm_addrbits;		/* NVM address bits */
    578 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    579 	int sc_ich8_flash_base;
    580 	int sc_ich8_flash_bank_size;
    581 	int sc_nvm_k1_enabled;
    582 
    583 	int sc_nqueues;
    584 	struct wm_queue *sc_queue;
    585 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    586 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    587 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    588 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    589 	struct workqueue *sc_queue_wq;
    590 	bool sc_txrx_use_workqueue;
    591 
    592 	int sc_affinity_offset;
    593 
    594 #ifdef WM_EVENT_COUNTERS
    595 	/* Event counters. */
    596 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    597 
    598 	/* >= WM_T_82542_2_1 */
    599 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    600 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    601 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    602 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    603 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    604 
    605 	struct evcnt sc_ev_crcerrs;	/* CRC Error */
    606 	struct evcnt sc_ev_algnerrc;	/* Alignment Error */
    607 	struct evcnt sc_ev_symerrc;	/* Symbol Error */
    608 	struct evcnt sc_ev_rxerrc;	/* Receive Error */
    609 	struct evcnt sc_ev_mpc;		/* Missed Packets */
    610 	struct evcnt sc_ev_colc;	/* Collision */
    611 	struct evcnt sc_ev_sec;		/* Sequence Error */
    612 	struct evcnt sc_ev_cexterr;	/* Carrier Extension Error */
    613 	struct evcnt sc_ev_rlec;	/* Receive Length Error */
    614 	struct evcnt sc_ev_scc;		/* Single Collision */
    615 	struct evcnt sc_ev_ecol;	/* Excessive Collision */
    616 	struct evcnt sc_ev_mcc;		/* Multiple Collision */
    617 	struct evcnt sc_ev_latecol;	/* Late Collision */
    618 	struct evcnt sc_ev_dc;		/* Defer */
    619 	struct evcnt sc_ev_gprc;	/* Good Packets Rx */
    620 	struct evcnt sc_ev_bprc;	/* Broadcast Packets Rx */
    621 	struct evcnt sc_ev_mprc;	/* Multicast Packets Rx */
    622 	struct evcnt sc_ev_gptc;	/* Good Packets Tx */
    623 	struct evcnt sc_ev_gorc;	/* Good Octets Rx */
    624 	struct evcnt sc_ev_gotc;	/* Good Octets Tx */
    625 	struct evcnt sc_ev_rnbc;	/* Rx No Buffers */
    626 	struct evcnt sc_ev_ruc;		/* Rx Undersize */
    627 	struct evcnt sc_ev_rfc;		/* Rx Fragment */
    628 	struct evcnt sc_ev_roc;		/* Rx Oversize */
    629 	struct evcnt sc_ev_rjc;		/* Rx Jabber */
    630 	struct evcnt sc_ev_tor;		/* Total Octets Rx */
    631 	struct evcnt sc_ev_tot;		/* Total Octets Tx */
    632 	struct evcnt sc_ev_tpr;		/* Total Packets Rx */
    633 	struct evcnt sc_ev_tpt;		/* Total Packets Tx */
    634 	struct evcnt sc_ev_mptc;	/* Multicast Packets Tx */
    635 	struct evcnt sc_ev_bptc;	/* Broadcast Packets Tx Count */
    636 	struct evcnt sc_ev_prc64;	/* Packets Rx (64 bytes) */
    637 	struct evcnt sc_ev_prc127;	/* Packets Rx (65-127 bytes) */
    638 	struct evcnt sc_ev_prc255;	/* Packets Rx (128-255 bytes) */
    639 	struct evcnt sc_ev_prc511;	/* Packets Rx (255-511 bytes) */
    640 	struct evcnt sc_ev_prc1023;	/* Packets Rx (512-1023 bytes) */
    641 	struct evcnt sc_ev_prc1522;	/* Packets Rx (1024-1522 bytes) */
    642 	struct evcnt sc_ev_ptc64;	/* Packets Tx (64 bytes) */
    643 	struct evcnt sc_ev_ptc127;	/* Packets Tx (65-127 bytes) */
    644 	struct evcnt sc_ev_ptc255;	/* Packets Tx (128-255 bytes) */
    645 	struct evcnt sc_ev_ptc511;	/* Packets Tx (256-511 bytes) */
    646 	struct evcnt sc_ev_ptc1023;	/* Packets Tx (512-1023 bytes) */
    647 	struct evcnt sc_ev_ptc1522;	/* Packets Tx (1024-1522 Bytes) */
    648 	struct evcnt sc_ev_iac;		/* Interrupt Assertion */
    649 	struct evcnt sc_ev_icrxptc;	/* Intr. Cause Rx Pkt Timer Expire */
    650 	struct evcnt sc_ev_icrxatc;	/* Intr. Cause Rx Abs Timer Expire */
    651 	struct evcnt sc_ev_ictxptc;	/* Intr. Cause Tx Pkt Timer Expire */
    652 	struct evcnt sc_ev_ictxact;	/* Intr. Cause Tx Abs Timer Expire */
    653 	struct evcnt sc_ev_ictxqec;	/* Intr. Cause Tx Queue Empty */
    654 	struct evcnt sc_ev_ictxqmtc;	/* Intr. Cause Tx Queue Min Thresh */
    655 	struct evcnt sc_ev_icrxdmtc;	/* Intr. Cause Rx Desc Min Thresh */
    656 	struct evcnt sc_ev_icrxoc;	/* Intr. Cause Receiver Overrun */
    657 	struct evcnt sc_ev_tncrs;	/* Tx-No CRS */
    658 	struct evcnt sc_ev_tsctc;	/* TCP Segmentation Context Tx */
    659 	struct evcnt sc_ev_tsctfc;	/* TCP Segmentation Context Tx Fail */
    660 	struct evcnt sc_ev_mgtprc;	/* Management Packets RX */
    661 	struct evcnt sc_ev_mgtpdc;	/* Management Packets Dropped */
    662 	struct evcnt sc_ev_mgtptc;	/* Management Packets TX */
    663 	struct evcnt sc_ev_b2ogprc;	/* BMC2OS pkts received by host */
    664 	struct evcnt sc_ev_o2bspc;	/* OS2BMC pkts transmitted by host */
    665 	struct evcnt sc_ev_b2ospc;	/* BMC2OS pkts sent by BMC */
    666 	struct evcnt sc_ev_o2bgptc;	/* OS2BMC pkts received by BMC */
    667 
    668 #endif /* WM_EVENT_COUNTERS */
    669 
    670 	struct sysctllog *sc_sysctllog;
    671 
    672 	/* This variable are used only on the 82547. */
    673 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    674 
    675 	uint32_t sc_ctrl;		/* prototype CTRL register */
    676 #if 0
    677 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    678 #endif
    679 	uint32_t sc_icr;		/* prototype interrupt bits */
    680 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    681 	uint32_t sc_tctl;		/* prototype TCTL register */
    682 	uint32_t sc_rctl;		/* prototype RCTL register */
    683 	uint32_t sc_txcw;		/* prototype TXCW register */
    684 	uint32_t sc_tipg;		/* prototype TIPG register */
    685 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    686 	uint32_t sc_pba;		/* prototype PBA register */
    687 
    688 	int sc_tbi_linkup;		/* TBI link status */
    689 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    690 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    691 
    692 	int sc_mchash_type;		/* multicast filter offset */
    693 
    694 	krndsource_t rnd_source;	/* random source */
    695 
    696 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    697 
    698 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    699 	kmutex_t *sc_ich_phymtx;	/*
    700 					 * 82574/82583/ICH/PCH specific PHY
    701 					 * mutex. For 82574/82583, the mutex
    702 					 * is used for both PHY and NVM.
    703 					 */
    704 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    705 
    706 	struct wm_phyop phy;
    707 	struct wm_nvmop nvm;
    708 #ifdef WM_DEBUG
    709 	uint32_t sc_debug;
    710 #endif
    711 };
    712 
    713 #define WM_CORE_LOCK(_sc)						\
    714 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    715 #define WM_CORE_UNLOCK(_sc)						\
    716 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    717 #define WM_CORE_LOCKED(_sc)						\
    718 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    719 
    720 #define	WM_RXCHAIN_RESET(rxq)						\
    721 do {									\
    722 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    723 	*(rxq)->rxq_tailp = NULL;					\
    724 	(rxq)->rxq_len = 0;						\
    725 } while (/*CONSTCOND*/0)
    726 
    727 #define	WM_RXCHAIN_LINK(rxq, m)						\
    728 do {									\
    729 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    730 	(rxq)->rxq_tailp = &(m)->m_next;				\
    731 } while (/*CONSTCOND*/0)
    732 
    733 #ifdef WM_EVENT_COUNTERS
    734 #ifdef __HAVE_ATOMIC64_LOADSTORE
    735 #define	WM_EVCNT_INCR(ev)						\
    736 	atomic_store_relaxed(&((ev)->ev_count),				\
    737 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
    738 #define	WM_EVCNT_ADD(ev, val)						\
    739 	atomic_store_relaxed(&((ev)->ev_count),				\
    740 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
    741 #else
    742 #define	WM_EVCNT_INCR(ev)						\
    743 	((ev)->ev_count)++
    744 #define	WM_EVCNT_ADD(ev, val)						\
    745 	(ev)->ev_count += (val)
    746 #endif
    747 
    748 #define WM_Q_EVCNT_INCR(qname, evname)			\
    749 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    750 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    751 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    752 #else /* !WM_EVENT_COUNTERS */
    753 #define	WM_EVCNT_INCR(ev)	/* nothing */
    754 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    755 
    756 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    757 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    758 #endif /* !WM_EVENT_COUNTERS */
    759 
    760 #define	CSR_READ(sc, reg)						\
    761 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    762 #define	CSR_WRITE(sc, reg, val)						\
    763 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    764 #define	CSR_WRITE_FLUSH(sc)						\
    765 	(void)CSR_READ((sc), WMREG_STATUS)
    766 
    767 #define ICH8_FLASH_READ32(sc, reg)					\
    768 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    769 	    (reg) + sc->sc_flashreg_offset)
    770 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    771 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    772 	    (reg) + sc->sc_flashreg_offset, (data))
    773 
    774 #define ICH8_FLASH_READ16(sc, reg)					\
    775 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    776 	    (reg) + sc->sc_flashreg_offset)
    777 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    778 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    779 	    (reg) + sc->sc_flashreg_offset, (data))
    780 
    781 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    782 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    783 
    784 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    785 #define	WM_CDTXADDR_HI(txq, x)						\
    786 	(sizeof(bus_addr_t) == 8 ?					\
    787 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    788 
    789 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    790 #define	WM_CDRXADDR_HI(rxq, x)						\
    791 	(sizeof(bus_addr_t) == 8 ?					\
    792 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    793 
    794 /*
    795  * Register read/write functions.
    796  * Other than CSR_{READ|WRITE}().
    797  */
    798 #if 0
    799 static inline uint32_t wm_io_read(struct wm_softc *, int);
    800 #endif
    801 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    802 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    803     uint32_t, uint32_t);
    804 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    805 
    806 /*
    807  * Descriptor sync/init functions.
    808  */
    809 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    810 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    811 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    812 
    813 /*
    814  * Device driver interface functions and commonly used functions.
    815  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    816  */
    817 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    818 static int	wm_match(device_t, cfdata_t, void *);
    819 static void	wm_attach(device_t, device_t, void *);
    820 static int	wm_detach(device_t, int);
    821 static bool	wm_suspend(device_t, const pmf_qual_t *);
    822 static bool	wm_resume(device_t, const pmf_qual_t *);
    823 static void	wm_watchdog(struct ifnet *);
    824 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    825     uint16_t *);
    826 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    827     uint16_t *);
    828 static void	wm_tick(void *);
    829 static int	wm_ifflags_cb(struct ethercom *);
    830 static int	wm_ioctl(struct ifnet *, u_long, void *);
    831 /* MAC address related */
    832 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    833 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    834 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    835 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    836 static int	wm_rar_count(struct wm_softc *);
    837 static void	wm_set_filter(struct wm_softc *);
    838 /* Reset and init related */
    839 static void	wm_set_vlan(struct wm_softc *);
    840 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    841 static void	wm_get_auto_rd_done(struct wm_softc *);
    842 static void	wm_lan_init_done(struct wm_softc *);
    843 static void	wm_get_cfg_done(struct wm_softc *);
    844 static int	wm_phy_post_reset(struct wm_softc *);
    845 static int	wm_write_smbus_addr(struct wm_softc *);
    846 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    847 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    848 static void	wm_initialize_hardware_bits(struct wm_softc *);
    849 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    850 static int	wm_reset_phy(struct wm_softc *);
    851 static void	wm_flush_desc_rings(struct wm_softc *);
    852 static void	wm_reset(struct wm_softc *);
    853 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    854 static void	wm_rxdrain(struct wm_rxqueue *);
    855 static void	wm_init_rss(struct wm_softc *);
    856 static void	wm_adjust_qnum(struct wm_softc *, int);
    857 static inline bool	wm_is_using_msix(struct wm_softc *);
    858 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    859 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    860 static int	wm_setup_legacy(struct wm_softc *);
    861 static int	wm_setup_msix(struct wm_softc *);
    862 static int	wm_init(struct ifnet *);
    863 static int	wm_init_locked(struct ifnet *);
    864 static void	wm_init_sysctls(struct wm_softc *);
    865 static void	wm_unset_stopping_flags(struct wm_softc *);
    866 static void	wm_set_stopping_flags(struct wm_softc *);
    867 static void	wm_stop(struct ifnet *, int);
    868 static void	wm_stop_locked(struct ifnet *, bool, bool);
    869 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    870 static void	wm_82547_txfifo_stall(void *);
    871 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    872 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    873 /* DMA related */
    874 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    875 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    876 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    877 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    878     struct wm_txqueue *);
    879 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    880 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    881 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    882     struct wm_rxqueue *);
    883 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    884 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    885 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    886 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    887 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    888 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    889 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    890     struct wm_txqueue *);
    891 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    892     struct wm_rxqueue *);
    893 static int	wm_alloc_txrx_queues(struct wm_softc *);
    894 static void	wm_free_txrx_queues(struct wm_softc *);
    895 static int	wm_init_txrx_queues(struct wm_softc *);
    896 /* Start */
    897 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    898     struct wm_txsoft *, uint32_t *, uint8_t *);
    899 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    900 static void	wm_start(struct ifnet *);
    901 static void	wm_start_locked(struct ifnet *);
    902 static int	wm_transmit(struct ifnet *, struct mbuf *);
    903 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    904 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    905 		    bool);
    906 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    907     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    908 static void	wm_nq_start(struct ifnet *);
    909 static void	wm_nq_start_locked(struct ifnet *);
    910 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    911 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    912 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    913 		    bool);
    914 static void	wm_deferred_start_locked(struct wm_txqueue *);
    915 static void	wm_handle_queue(void *);
    916 static void	wm_handle_queue_work(struct work *, void *);
    917 /* Interrupt */
    918 static bool	wm_txeof(struct wm_txqueue *, u_int);
    919 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    920 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    921 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    922 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    923 static void	wm_linkintr(struct wm_softc *, uint32_t);
    924 static int	wm_intr_legacy(void *);
    925 static inline void	wm_txrxintr_disable(struct wm_queue *);
    926 static inline void	wm_txrxintr_enable(struct wm_queue *);
    927 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    928 static int	wm_txrxintr_msix(void *);
    929 static int	wm_linkintr_msix(void *);
    930 
    931 /*
    932  * Media related.
    933  * GMII, SGMII, TBI, SERDES and SFP.
    934  */
    935 /* Common */
    936 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    937 /* GMII related */
    938 static void	wm_gmii_reset(struct wm_softc *);
    939 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    940 static int	wm_get_phy_id_82575(struct wm_softc *);
    941 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    942 static int	wm_gmii_mediachange(struct ifnet *);
    943 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    944 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    945 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    946 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    947 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    948 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    949 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    950 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    951 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    952 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    953 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    954 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    955 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    956 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    957 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    958 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    959 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    960 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    961 	bool);
    962 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    963 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    964 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    965 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    966 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    967 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    968 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    969 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    970 static void	wm_gmii_statchg(struct ifnet *);
    971 /*
    972  * kumeran related (80003, ICH* and PCH*).
    973  * These functions are not for accessing MII registers but for accessing
    974  * kumeran specific registers.
    975  */
    976 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    977 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    978 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    979 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    980 /* EMI register related */
    981 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    982 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    983 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    984 /* SGMII */
    985 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    986 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
    987 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    988 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    989 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    990 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    991 /* TBI related */
    992 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    993 static void	wm_tbi_mediainit(struct wm_softc *);
    994 static int	wm_tbi_mediachange(struct ifnet *);
    995 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    996 static int	wm_check_for_link(struct wm_softc *);
    997 static void	wm_tbi_tick(struct wm_softc *);
    998 /* SERDES related */
    999 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
   1000 static int	wm_serdes_mediachange(struct ifnet *);
   1001 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
   1002 static void	wm_serdes_tick(struct wm_softc *);
   1003 /* SFP related */
   1004 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
   1005 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
   1006 
   1007 /*
   1008  * NVM related.
   1009  * Microwire, SPI (w/wo EERD) and Flash.
   1010  */
   1011 /* Misc functions */
   1012 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
   1013 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
   1014 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
   1015 /* Microwire */
   1016 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
   1017 /* SPI */
   1018 static int	wm_nvm_ready_spi(struct wm_softc *);
   1019 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
   1020 /* Using with EERD */
   1021 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
   1022 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
   1023 /* Flash */
   1024 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
   1025     unsigned int *);
   1026 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
   1027 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
   1028 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
   1029     uint32_t *);
   1030 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
   1031 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
   1032 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
   1033 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
   1034 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
   1035 /* iNVM */
   1036 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
   1037 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
   1038 /* Lock, detecting NVM type, validate checksum and read */
   1039 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
   1040 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
   1041 static int	wm_nvm_validate_checksum(struct wm_softc *);
   1042 static void	wm_nvm_version_invm(struct wm_softc *);
   1043 static void	wm_nvm_version(struct wm_softc *);
   1044 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
   1045 
   1046 /*
   1047  * Hardware semaphores.
   1048  * Very complexed...
   1049  */
   1050 static int	wm_get_null(struct wm_softc *);
   1051 static void	wm_put_null(struct wm_softc *);
   1052 static int	wm_get_eecd(struct wm_softc *);
   1053 static void	wm_put_eecd(struct wm_softc *);
   1054 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
   1055 static void	wm_put_swsm_semaphore(struct wm_softc *);
   1056 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
   1057 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
   1058 static int	wm_get_nvm_80003(struct wm_softc *);
   1059 static void	wm_put_nvm_80003(struct wm_softc *);
   1060 static int	wm_get_nvm_82571(struct wm_softc *);
   1061 static void	wm_put_nvm_82571(struct wm_softc *);
   1062 static int	wm_get_phy_82575(struct wm_softc *);
   1063 static void	wm_put_phy_82575(struct wm_softc *);
   1064 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
   1065 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
   1066 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
   1067 static void	wm_put_swflag_ich8lan(struct wm_softc *);
   1068 static int	wm_get_nvm_ich8lan(struct wm_softc *);
   1069 static void	wm_put_nvm_ich8lan(struct wm_softc *);
   1070 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
   1071 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
   1072 
   1073 /*
   1074  * Management mode and power management related subroutines.
   1075  * BMC, AMT, suspend/resume and EEE.
   1076  */
   1077 #if 0
   1078 static int	wm_check_mng_mode(struct wm_softc *);
   1079 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
   1080 static int	wm_check_mng_mode_82574(struct wm_softc *);
   1081 static int	wm_check_mng_mode_generic(struct wm_softc *);
   1082 #endif
   1083 static int	wm_enable_mng_pass_thru(struct wm_softc *);
   1084 static bool	wm_phy_resetisblocked(struct wm_softc *);
   1085 static void	wm_get_hw_control(struct wm_softc *);
   1086 static void	wm_release_hw_control(struct wm_softc *);
   1087 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
   1088 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
   1089 static void	wm_init_manageability(struct wm_softc *);
   1090 static void	wm_release_manageability(struct wm_softc *);
   1091 static void	wm_get_wakeup(struct wm_softc *);
   1092 static int	wm_ulp_disable(struct wm_softc *);
   1093 static int	wm_enable_phy_wakeup(struct wm_softc *);
   1094 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
   1095 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
   1096 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1097 static void	wm_enable_wakeup(struct wm_softc *);
   1098 static void	wm_disable_aspm(struct wm_softc *);
   1099 /* LPLU (Low Power Link Up) */
   1100 static void	wm_lplu_d0_disable(struct wm_softc *);
   1101 /* EEE */
   1102 static int	wm_set_eee_i350(struct wm_softc *);
   1103 static int	wm_set_eee_pchlan(struct wm_softc *);
   1104 static int	wm_set_eee(struct wm_softc *);
   1105 
   1106 /*
   1107  * Workarounds (mainly PHY related).
   1108  * Basically, PHY's workarounds are in the PHY drivers.
   1109  */
   1110 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1111 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1112 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1113 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1114 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
   1115 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
   1116 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1117 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1118 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1119 static int	wm_k1_workaround_lv(struct wm_softc *);
   1120 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1121 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1122 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1123 static void	wm_reset_init_script_82575(struct wm_softc *);
   1124 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1125 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1126 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1127 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1128 static int	wm_pll_workaround_i210(struct wm_softc *);
   1129 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1130 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
   1131 static void	wm_set_linkdown_discard(struct wm_softc *);
   1132 static void	wm_clear_linkdown_discard(struct wm_softc *);
   1133 
   1134 static int	wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
   1135 static int	wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
   1136 #ifdef WM_DEBUG
   1137 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
   1138 #endif
   1139 
   1140 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1141     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1142 
   1143 /*
   1144  * Devices supported by this driver.
   1145  */
   1146 static const struct wm_product {
   1147 	pci_vendor_id_t		wmp_vendor;
   1148 	pci_product_id_t	wmp_product;
   1149 	const char		*wmp_name;
   1150 	wm_chip_type		wmp_type;
   1151 	uint32_t		wmp_flags;
   1152 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1153 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1154 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1155 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1156 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1157 } wm_products[] = {
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1159 	  "Intel i82542 1000BASE-X Ethernet",
   1160 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1161 
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1163 	  "Intel i82543GC 1000BASE-X Ethernet",
   1164 	  WM_T_82543,		WMP_F_FIBER },
   1165 
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1167 	  "Intel i82543GC 1000BASE-T Ethernet",
   1168 	  WM_T_82543,		WMP_F_COPPER },
   1169 
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1171 	  "Intel i82544EI 1000BASE-T Ethernet",
   1172 	  WM_T_82544,		WMP_F_COPPER },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1175 	  "Intel i82544EI 1000BASE-X Ethernet",
   1176 	  WM_T_82544,		WMP_F_FIBER },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1179 	  "Intel i82544GC 1000BASE-T Ethernet",
   1180 	  WM_T_82544,		WMP_F_COPPER },
   1181 
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1183 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1184 	  WM_T_82544,		WMP_F_COPPER },
   1185 
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1187 	  "Intel i82540EM 1000BASE-T Ethernet",
   1188 	  WM_T_82540,		WMP_F_COPPER },
   1189 
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1191 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1192 	  WM_T_82540,		WMP_F_COPPER },
   1193 
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1195 	  "Intel i82540EP 1000BASE-T Ethernet",
   1196 	  WM_T_82540,		WMP_F_COPPER },
   1197 
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1199 	  "Intel i82540EP 1000BASE-T Ethernet",
   1200 	  WM_T_82540,		WMP_F_COPPER },
   1201 
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1203 	  "Intel i82540EP 1000BASE-T Ethernet",
   1204 	  WM_T_82540,		WMP_F_COPPER },
   1205 
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1207 	  "Intel i82545EM 1000BASE-T Ethernet",
   1208 	  WM_T_82545,		WMP_F_COPPER },
   1209 
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1211 	  "Intel i82545GM 1000BASE-T Ethernet",
   1212 	  WM_T_82545_3,		WMP_F_COPPER },
   1213 
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1215 	  "Intel i82545GM 1000BASE-X Ethernet",
   1216 	  WM_T_82545_3,		WMP_F_FIBER },
   1217 
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1219 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1220 	  WM_T_82545_3,		WMP_F_SERDES },
   1221 
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1223 	  "Intel i82546EB 1000BASE-T Ethernet",
   1224 	  WM_T_82546,		WMP_F_COPPER },
   1225 
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1227 	  "Intel i82546EB 1000BASE-T Ethernet",
   1228 	  WM_T_82546,		WMP_F_COPPER },
   1229 
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1231 	  "Intel i82545EM 1000BASE-X Ethernet",
   1232 	  WM_T_82545,		WMP_F_FIBER },
   1233 
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1235 	  "Intel i82546EB 1000BASE-X Ethernet",
   1236 	  WM_T_82546,		WMP_F_FIBER },
   1237 
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1239 	  "Intel i82546GB 1000BASE-T Ethernet",
   1240 	  WM_T_82546_3,		WMP_F_COPPER },
   1241 
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1243 	  "Intel i82546GB 1000BASE-X Ethernet",
   1244 	  WM_T_82546_3,		WMP_F_FIBER },
   1245 
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1247 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1248 	  WM_T_82546_3,		WMP_F_SERDES },
   1249 
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1251 	  "i82546GB quad-port Gigabit Ethernet",
   1252 	  WM_T_82546_3,		WMP_F_COPPER },
   1253 
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1255 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1256 	  WM_T_82546_3,		WMP_F_COPPER },
   1257 
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1259 	  "Intel PRO/1000MT (82546GB)",
   1260 	  WM_T_82546_3,		WMP_F_COPPER },
   1261 
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1263 	  "Intel i82541EI 1000BASE-T Ethernet",
   1264 	  WM_T_82541,		WMP_F_COPPER },
   1265 
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1267 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1268 	  WM_T_82541,		WMP_F_COPPER },
   1269 
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1271 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1272 	  WM_T_82541,		WMP_F_COPPER },
   1273 
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1275 	  "Intel i82541ER 1000BASE-T Ethernet",
   1276 	  WM_T_82541_2,		WMP_F_COPPER },
   1277 
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1279 	  "Intel i82541GI 1000BASE-T Ethernet",
   1280 	  WM_T_82541_2,		WMP_F_COPPER },
   1281 
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1283 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1284 	  WM_T_82541_2,		WMP_F_COPPER },
   1285 
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1287 	  "Intel i82541PI 1000BASE-T Ethernet",
   1288 	  WM_T_82541_2,		WMP_F_COPPER },
   1289 
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1291 	  "Intel i82547EI 1000BASE-T Ethernet",
   1292 	  WM_T_82547,		WMP_F_COPPER },
   1293 
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1295 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1296 	  WM_T_82547,		WMP_F_COPPER },
   1297 
   1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1299 	  "Intel i82547GI 1000BASE-T Ethernet",
   1300 	  WM_T_82547_2,		WMP_F_COPPER },
   1301 
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1303 	  "Intel PRO/1000 PT (82571EB)",
   1304 	  WM_T_82571,		WMP_F_COPPER },
   1305 
   1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1307 	  "Intel PRO/1000 PF (82571EB)",
   1308 	  WM_T_82571,		WMP_F_FIBER },
   1309 
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1311 	  "Intel PRO/1000 PB (82571EB)",
   1312 	  WM_T_82571,		WMP_F_SERDES },
   1313 
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1315 	  "Intel PRO/1000 QT (82571EB)",
   1316 	  WM_T_82571,		WMP_F_COPPER },
   1317 
   1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1319 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1320 	  WM_T_82571,		WMP_F_COPPER },
   1321 
   1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1323 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1324 	  WM_T_82571,		WMP_F_COPPER },
   1325 
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1327 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1328 	  WM_T_82571,		WMP_F_SERDES },
   1329 
   1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1331 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1332 	  WM_T_82571,		WMP_F_SERDES },
   1333 
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1335 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1336 	  WM_T_82571,		WMP_F_FIBER },
   1337 
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1339 	  "Intel i82572EI 1000baseT Ethernet",
   1340 	  WM_T_82572,		WMP_F_COPPER },
   1341 
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1343 	  "Intel i82572EI 1000baseX Ethernet",
   1344 	  WM_T_82572,		WMP_F_FIBER },
   1345 
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1347 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1348 	  WM_T_82572,		WMP_F_SERDES },
   1349 
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1351 	  "Intel i82572EI 1000baseT Ethernet",
   1352 	  WM_T_82572,		WMP_F_COPPER },
   1353 
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1355 	  "Intel i82573E",
   1356 	  WM_T_82573,		WMP_F_COPPER },
   1357 
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1359 	  "Intel i82573E IAMT",
   1360 	  WM_T_82573,		WMP_F_COPPER },
   1361 
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1363 	  "Intel i82573L Gigabit Ethernet",
   1364 	  WM_T_82573,		WMP_F_COPPER },
   1365 
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1367 	  "Intel i82574L",
   1368 	  WM_T_82574,		WMP_F_COPPER },
   1369 
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1371 	  "Intel i82574L",
   1372 	  WM_T_82574,		WMP_F_COPPER },
   1373 
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1375 	  "Intel i82583V",
   1376 	  WM_T_82583,		WMP_F_COPPER },
   1377 
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1379 	  "i80003 dual 1000baseT Ethernet",
   1380 	  WM_T_80003,		WMP_F_COPPER },
   1381 
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1383 	  "i80003 dual 1000baseX Ethernet",
   1384 	  WM_T_80003,		WMP_F_COPPER },
   1385 
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1387 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1388 	  WM_T_80003,		WMP_F_SERDES },
   1389 
   1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1391 	  "Intel i80003 1000baseT Ethernet",
   1392 	  WM_T_80003,		WMP_F_COPPER },
   1393 
   1394 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1395 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1396 	  WM_T_80003,		WMP_F_SERDES },
   1397 
   1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1399 	  "Intel i82801H (M_AMT) LAN Controller",
   1400 	  WM_T_ICH8,		WMP_F_COPPER },
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1402 	  "Intel i82801H (AMT) LAN Controller",
   1403 	  WM_T_ICH8,		WMP_F_COPPER },
   1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1405 	  "Intel i82801H LAN Controller",
   1406 	  WM_T_ICH8,		WMP_F_COPPER },
   1407 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1408 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1409 	  WM_T_ICH8,		WMP_F_COPPER },
   1410 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1411 	  "Intel i82801H (M) LAN Controller",
   1412 	  WM_T_ICH8,		WMP_F_COPPER },
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1414 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1415 	  WM_T_ICH8,		WMP_F_COPPER },
   1416 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1417 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1418 	  WM_T_ICH8,		WMP_F_COPPER },
   1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1420 	  "82567V-3 LAN Controller",
   1421 	  WM_T_ICH8,		WMP_F_COPPER },
   1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1423 	  "82801I (AMT) LAN Controller",
   1424 	  WM_T_ICH9,		WMP_F_COPPER },
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1426 	  "82801I 10/100 LAN Controller",
   1427 	  WM_T_ICH9,		WMP_F_COPPER },
   1428 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1429 	  "82801I (G) 10/100 LAN Controller",
   1430 	  WM_T_ICH9,		WMP_F_COPPER },
   1431 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1432 	  "82801I (GT) 10/100 LAN Controller",
   1433 	  WM_T_ICH9,		WMP_F_COPPER },
   1434 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1435 	  "82801I (C) LAN Controller",
   1436 	  WM_T_ICH9,		WMP_F_COPPER },
   1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1438 	  "82801I mobile LAN Controller",
   1439 	  WM_T_ICH9,		WMP_F_COPPER },
   1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1441 	  "82801I mobile (V) LAN Controller",
   1442 	  WM_T_ICH9,		WMP_F_COPPER },
   1443 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1444 	  "82801I mobile (AMT) LAN Controller",
   1445 	  WM_T_ICH9,		WMP_F_COPPER },
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1447 	  "82567LM-4 LAN Controller",
   1448 	  WM_T_ICH9,		WMP_F_COPPER },
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1450 	  "82567LM-2 LAN Controller",
   1451 	  WM_T_ICH10,		WMP_F_COPPER },
   1452 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1453 	  "82567LF-2 LAN Controller",
   1454 	  WM_T_ICH10,		WMP_F_COPPER },
   1455 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1456 	  "82567LM-3 LAN Controller",
   1457 	  WM_T_ICH10,		WMP_F_COPPER },
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1459 	  "82567LF-3 LAN Controller",
   1460 	  WM_T_ICH10,		WMP_F_COPPER },
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1462 	  "82567V-2 LAN Controller",
   1463 	  WM_T_ICH10,		WMP_F_COPPER },
   1464 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1465 	  "82567V-3? LAN Controller",
   1466 	  WM_T_ICH10,		WMP_F_COPPER },
   1467 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1468 	  "HANKSVILLE LAN Controller",
   1469 	  WM_T_ICH10,		WMP_F_COPPER },
   1470 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1471 	  "PCH LAN (82577LM) Controller",
   1472 	  WM_T_PCH,		WMP_F_COPPER },
   1473 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1474 	  "PCH LAN (82577LC) Controller",
   1475 	  WM_T_PCH,		WMP_F_COPPER },
   1476 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1477 	  "PCH LAN (82578DM) Controller",
   1478 	  WM_T_PCH,		WMP_F_COPPER },
   1479 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1480 	  "PCH LAN (82578DC) Controller",
   1481 	  WM_T_PCH,		WMP_F_COPPER },
   1482 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1483 	  "PCH2 LAN (82579LM) Controller",
   1484 	  WM_T_PCH2,		WMP_F_COPPER },
   1485 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1486 	  "PCH2 LAN (82579V) Controller",
   1487 	  WM_T_PCH2,		WMP_F_COPPER },
   1488 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1489 	  "82575EB dual-1000baseT Ethernet",
   1490 	  WM_T_82575,		WMP_F_COPPER },
   1491 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1492 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1493 	  WM_T_82575,		WMP_F_SERDES },
   1494 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1495 	  "82575GB quad-1000baseT Ethernet",
   1496 	  WM_T_82575,		WMP_F_COPPER },
   1497 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1498 	  "82575GB quad-1000baseT Ethernet (PM)",
   1499 	  WM_T_82575,		WMP_F_COPPER },
   1500 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1501 	  "82576 1000BaseT Ethernet",
   1502 	  WM_T_82576,		WMP_F_COPPER },
   1503 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1504 	  "82576 1000BaseX Ethernet",
   1505 	  WM_T_82576,		WMP_F_FIBER },
   1506 
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1508 	  "82576 gigabit Ethernet (SERDES)",
   1509 	  WM_T_82576,		WMP_F_SERDES },
   1510 
   1511 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1512 	  "82576 quad-1000BaseT Ethernet",
   1513 	  WM_T_82576,		WMP_F_COPPER },
   1514 
   1515 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1516 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1517 	  WM_T_82576,		WMP_F_COPPER },
   1518 
   1519 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1520 	  "82576 gigabit Ethernet",
   1521 	  WM_T_82576,		WMP_F_COPPER },
   1522 
   1523 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1524 	  "82576 gigabit Ethernet (SERDES)",
   1525 	  WM_T_82576,		WMP_F_SERDES },
   1526 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1527 	  "82576 quad-gigabit Ethernet (SERDES)",
   1528 	  WM_T_82576,		WMP_F_SERDES },
   1529 
   1530 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1531 	  "82580 1000BaseT Ethernet",
   1532 	  WM_T_82580,		WMP_F_COPPER },
   1533 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1534 	  "82580 1000BaseX Ethernet",
   1535 	  WM_T_82580,		WMP_F_FIBER },
   1536 
   1537 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1538 	  "82580 1000BaseT Ethernet (SERDES)",
   1539 	  WM_T_82580,		WMP_F_SERDES },
   1540 
   1541 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1542 	  "82580 gigabit Ethernet (SGMII)",
   1543 	  WM_T_82580,		WMP_F_COPPER },
   1544 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1545 	  "82580 dual-1000BaseT Ethernet",
   1546 	  WM_T_82580,		WMP_F_COPPER },
   1547 
   1548 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1549 	  "82580 quad-1000BaseX Ethernet",
   1550 	  WM_T_82580,		WMP_F_FIBER },
   1551 
   1552 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1553 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1554 	  WM_T_82580,		WMP_F_COPPER },
   1555 
   1556 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1557 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1558 	  WM_T_82580,		WMP_F_SERDES },
   1559 
   1560 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1561 	  "DH89XXCC 1000BASE-KX Ethernet",
   1562 	  WM_T_82580,		WMP_F_SERDES },
   1563 
   1564 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1565 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1566 	  WM_T_82580,		WMP_F_SERDES },
   1567 
   1568 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1569 	  "I350 Gigabit Network Connection",
   1570 	  WM_T_I350,		WMP_F_COPPER },
   1571 
   1572 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1573 	  "I350 Gigabit Fiber Network Connection",
   1574 	  WM_T_I350,		WMP_F_FIBER },
   1575 
   1576 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1577 	  "I350 Gigabit Backplane Connection",
   1578 	  WM_T_I350,		WMP_F_SERDES },
   1579 
   1580 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1581 	  "I350 Quad Port Gigabit Ethernet",
   1582 	  WM_T_I350,		WMP_F_SERDES },
   1583 
   1584 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1585 	  "I350 Gigabit Connection",
   1586 	  WM_T_I350,		WMP_F_COPPER },
   1587 
   1588 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1589 	  "I354 Gigabit Ethernet (KX)",
   1590 	  WM_T_I354,		WMP_F_SERDES },
   1591 
   1592 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1593 	  "I354 Gigabit Ethernet (SGMII)",
   1594 	  WM_T_I354,		WMP_F_COPPER },
   1595 
   1596 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1597 	  "I354 Gigabit Ethernet (2.5G)",
   1598 	  WM_T_I354,		WMP_F_COPPER },
   1599 
   1600 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1601 	  "I210-T1 Ethernet Server Adapter",
   1602 	  WM_T_I210,		WMP_F_COPPER },
   1603 
   1604 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1605 	  "I210 Ethernet (Copper OEM)",
   1606 	  WM_T_I210,		WMP_F_COPPER },
   1607 
   1608 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1609 	  "I210 Ethernet (Copper IT)",
   1610 	  WM_T_I210,		WMP_F_COPPER },
   1611 
   1612 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1613 	  "I210 Ethernet (Copper, FLASH less)",
   1614 	  WM_T_I210,		WMP_F_COPPER },
   1615 
   1616 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1617 	  "I210 Gigabit Ethernet (Fiber)",
   1618 	  WM_T_I210,		WMP_F_FIBER },
   1619 
   1620 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1621 	  "I210 Gigabit Ethernet (SERDES)",
   1622 	  WM_T_I210,		WMP_F_SERDES },
   1623 
   1624 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1625 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1626 	  WM_T_I210,		WMP_F_SERDES },
   1627 
   1628 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1629 	  "I210 Gigabit Ethernet (SGMII)",
   1630 	  WM_T_I210,		WMP_F_COPPER },
   1631 
   1632 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1633 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1634 	  WM_T_I210,		WMP_F_COPPER },
   1635 
   1636 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1637 	  "I211 Ethernet (COPPER)",
   1638 	  WM_T_I211,		WMP_F_COPPER },
   1639 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1640 	  "I217 V Ethernet Connection",
   1641 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1642 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1643 	  "I217 LM Ethernet Connection",
   1644 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1645 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1646 	  "I218 V Ethernet Connection",
   1647 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1648 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1649 	  "I218 V Ethernet Connection",
   1650 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1651 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1652 	  "I218 V Ethernet Connection",
   1653 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1654 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1655 	  "I218 LM Ethernet Connection",
   1656 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1657 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1658 	  "I218 LM Ethernet Connection",
   1659 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1660 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1661 	  "I218 LM Ethernet Connection",
   1662 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1663 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1664 	  "I219 LM Ethernet Connection",
   1665 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1666 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1667 	  "I219 LM (2) Ethernet Connection",
   1668 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1669 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1670 	  "I219 LM (3) Ethernet Connection",
   1671 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1672 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1673 	  "I219 LM (4) Ethernet Connection",
   1674 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1675 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1676 	  "I219 LM (5) Ethernet Connection",
   1677 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1678 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1679 	  "I219 LM (6) Ethernet Connection",
   1680 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1681 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1682 	  "I219 LM (7) Ethernet Connection",
   1683 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1684 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1685 	  "I219 LM (8) Ethernet Connection",
   1686 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1687 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1688 	  "I219 LM (9) Ethernet Connection",
   1689 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1690 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1691 	  "I219 LM (10) Ethernet Connection",
   1692 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1693 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1694 	  "I219 LM (11) Ethernet Connection",
   1695 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1696 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1697 	  "I219 LM (12) Ethernet Connection",
   1698 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1699 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1700 	  "I219 LM (13) Ethernet Connection",
   1701 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1702 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1703 	  "I219 LM (14) Ethernet Connection",
   1704 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1705 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1706 	  "I219 LM (15) Ethernet Connection",
   1707 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1708 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM16,
   1709 	  "I219 LM (16) Ethernet Connection",
   1710 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1711 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM17,
   1712 	  "I219 LM (17) Ethernet Connection",
   1713 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1714 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM18,
   1715 	  "I219 LM (18) Ethernet Connection",
   1716 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1717 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM19,
   1718 	  "I219 LM (19) Ethernet Connection",
   1719 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1720 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1721 	  "I219 V Ethernet Connection",
   1722 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1723 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1724 	  "I219 V (2) Ethernet Connection",
   1725 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1726 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1727 	  "I219 V (4) Ethernet Connection",
   1728 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1729 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1730 	  "I219 V (5) Ethernet Connection",
   1731 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1732 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1733 	  "I219 V (6) Ethernet Connection",
   1734 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1735 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1736 	  "I219 V (7) Ethernet Connection",
   1737 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1738 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1739 	  "I219 V (8) Ethernet Connection",
   1740 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1741 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1742 	  "I219 V (9) Ethernet Connection",
   1743 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1744 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1745 	  "I219 V (10) Ethernet Connection",
   1746 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1747 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1748 	  "I219 V (11) Ethernet Connection",
   1749 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1750 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1751 	  "I219 V (12) Ethernet Connection",
   1752 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1753 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1754 	  "I219 V (13) Ethernet Connection",
   1755 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1756 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1757 	  "I219 V (14) Ethernet Connection",
   1758 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1759 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V15,
   1760 	  "I219 V (15) Ethernet Connection",
   1761 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1762 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V16,
   1763 	  "I219 V (16) Ethernet Connection",
   1764 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1765 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V17,
   1766 	  "I219 V (17) Ethernet Connection",
   1767 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1768 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V18,
   1769 	  "I219 V (18) Ethernet Connection",
   1770 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1771 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V19,
   1772 	  "I219 V (19) Ethernet Connection",
   1773 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1774 	{ 0,			0,
   1775 	  NULL,
   1776 	  0,			0 },
   1777 };
   1778 
   1779 /*
   1780  * Register read/write functions.
   1781  * Other than CSR_{READ|WRITE}().
   1782  */
   1783 
   1784 #if 0 /* Not currently used */
   1785 static inline uint32_t
   1786 wm_io_read(struct wm_softc *sc, int reg)
   1787 {
   1788 
   1789 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1790 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1791 }
   1792 #endif
   1793 
   1794 static inline void
   1795 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1796 {
   1797 
   1798 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1799 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1800 }
   1801 
   1802 static inline void
   1803 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1804     uint32_t data)
   1805 {
   1806 	uint32_t regval;
   1807 	int i;
   1808 
   1809 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1810 
   1811 	CSR_WRITE(sc, reg, regval);
   1812 
   1813 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1814 		delay(5);
   1815 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1816 			break;
   1817 	}
   1818 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1819 		aprint_error("%s: WARNING:"
   1820 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1821 		    device_xname(sc->sc_dev), reg);
   1822 	}
   1823 }
   1824 
   1825 static inline void
   1826 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1827 {
   1828 	wa->wa_low = htole32(BUS_ADDR_LO32(v));
   1829 	wa->wa_high = htole32(BUS_ADDR_HI32(v));
   1830 }
   1831 
   1832 /*
   1833  * Descriptor sync/init functions.
   1834  */
   1835 static inline void
   1836 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1837 {
   1838 	struct wm_softc *sc = txq->txq_sc;
   1839 
   1840 	/* If it will wrap around, sync to the end of the ring. */
   1841 	if ((start + num) > WM_NTXDESC(txq)) {
   1842 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1843 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1844 		    (WM_NTXDESC(txq) - start), ops);
   1845 		num -= (WM_NTXDESC(txq) - start);
   1846 		start = 0;
   1847 	}
   1848 
   1849 	/* Now sync whatever is left. */
   1850 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1851 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1852 }
   1853 
   1854 static inline void
   1855 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1856 {
   1857 	struct wm_softc *sc = rxq->rxq_sc;
   1858 
   1859 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1860 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1861 }
   1862 
   1863 static inline void
   1864 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1865 {
   1866 	struct wm_softc *sc = rxq->rxq_sc;
   1867 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1868 	struct mbuf *m = rxs->rxs_mbuf;
   1869 
   1870 	/*
   1871 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1872 	 * so that the payload after the Ethernet header is aligned
   1873 	 * to a 4-byte boundary.
   1874 
   1875 	 * XXX BRAINDAMAGE ALERT!
   1876 	 * The stupid chip uses the same size for every buffer, which
   1877 	 * is set in the Receive Control register.  We are using the 2K
   1878 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1879 	 * reason, we can't "scoot" packets longer than the standard
   1880 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1881 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1882 	 * the upper layer copy the headers.
   1883 	 */
   1884 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1885 
   1886 	if (sc->sc_type == WM_T_82574) {
   1887 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1888 		rxd->erx_data.erxd_addr =
   1889 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1890 		rxd->erx_data.erxd_dd = 0;
   1891 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1892 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1893 
   1894 		rxd->nqrx_data.nrxd_paddr =
   1895 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1896 		/* Currently, split header is not supported. */
   1897 		rxd->nqrx_data.nrxd_haddr = 0;
   1898 	} else {
   1899 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1900 
   1901 		wm_set_dma_addr(&rxd->wrx_addr,
   1902 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1903 		rxd->wrx_len = 0;
   1904 		rxd->wrx_cksum = 0;
   1905 		rxd->wrx_status = 0;
   1906 		rxd->wrx_errors = 0;
   1907 		rxd->wrx_special = 0;
   1908 	}
   1909 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1910 
   1911 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1912 }
   1913 
   1914 /*
   1915  * Device driver interface functions and commonly used functions.
   1916  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1917  */
   1918 
   1919 /* Lookup supported device table */
   1920 static const struct wm_product *
   1921 wm_lookup(const struct pci_attach_args *pa)
   1922 {
   1923 	const struct wm_product *wmp;
   1924 
   1925 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1926 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1927 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1928 			return wmp;
   1929 	}
   1930 	return NULL;
   1931 }
   1932 
   1933 /* The match function (ca_match) */
   1934 static int
   1935 wm_match(device_t parent, cfdata_t cf, void *aux)
   1936 {
   1937 	struct pci_attach_args *pa = aux;
   1938 
   1939 	if (wm_lookup(pa) != NULL)
   1940 		return 1;
   1941 
   1942 	return 0;
   1943 }
   1944 
   1945 /* The attach function (ca_attach) */
   1946 static void
   1947 wm_attach(device_t parent, device_t self, void *aux)
   1948 {
   1949 	struct wm_softc *sc = device_private(self);
   1950 	struct pci_attach_args *pa = aux;
   1951 	prop_dictionary_t dict;
   1952 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1953 	pci_chipset_tag_t pc = pa->pa_pc;
   1954 	int counts[PCI_INTR_TYPE_SIZE];
   1955 	pci_intr_type_t max_type;
   1956 	const char *eetype, *xname;
   1957 	bus_space_tag_t memt;
   1958 	bus_space_handle_t memh;
   1959 	bus_size_t memsize;
   1960 	int memh_valid;
   1961 	int i, error;
   1962 	const struct wm_product *wmp;
   1963 	prop_data_t ea;
   1964 	prop_number_t pn;
   1965 	uint8_t enaddr[ETHER_ADDR_LEN];
   1966 	char buf[256];
   1967 	char wqname[MAXCOMLEN];
   1968 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1969 	pcireg_t preg, memtype;
   1970 	uint16_t eeprom_data, apme_mask;
   1971 	bool force_clear_smbi;
   1972 	uint32_t link_mode;
   1973 	uint32_t reg;
   1974 
   1975 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
   1976 	sc->sc_debug = WM_DEBUG_DEFAULT;
   1977 #endif
   1978 	sc->sc_dev = self;
   1979 	callout_init(&sc->sc_tick_ch, WM_CALLOUT_FLAGS);
   1980 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   1981 	sc->sc_core_stopping = false;
   1982 
   1983 	wmp = wm_lookup(pa);
   1984 #ifdef DIAGNOSTIC
   1985 	if (wmp == NULL) {
   1986 		printf("\n");
   1987 		panic("wm_attach: impossible");
   1988 	}
   1989 #endif
   1990 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1991 
   1992 	sc->sc_pc = pa->pa_pc;
   1993 	sc->sc_pcitag = pa->pa_tag;
   1994 
   1995 	if (pci_dma64_available(pa)) {
   1996 		aprint_verbose(", 64-bit DMA");
   1997 		sc->sc_dmat = pa->pa_dmat64;
   1998 	} else {
   1999 		aprint_verbose(", 32-bit DMA");
   2000 		sc->sc_dmat = pa->pa_dmat;
   2001 	}
   2002 
   2003 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   2004 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   2005 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   2006 
   2007 	sc->sc_type = wmp->wmp_type;
   2008 
   2009 	/* Set default function pointers */
   2010 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   2011 	sc->phy.release = sc->nvm.release = wm_put_null;
   2012 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   2013 
   2014 	if (sc->sc_type < WM_T_82543) {
   2015 		if (sc->sc_rev < 2) {
   2016 			aprint_error_dev(sc->sc_dev,
   2017 			    "i82542 must be at least rev. 2\n");
   2018 			return;
   2019 		}
   2020 		if (sc->sc_rev < 3)
   2021 			sc->sc_type = WM_T_82542_2_0;
   2022 	}
   2023 
   2024 	/*
   2025 	 * Disable MSI for Errata:
   2026 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   2027 	 *
   2028 	 *  82544: Errata 25
   2029 	 *  82540: Errata  6 (easy to reproduce device timeout)
   2030 	 *  82545: Errata  4 (easy to reproduce device timeout)
   2031 	 *  82546: Errata 26 (easy to reproduce device timeout)
   2032 	 *  82541: Errata  7 (easy to reproduce device timeout)
   2033 	 *
   2034 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   2035 	 *
   2036 	 *  82571 & 82572: Errata 63
   2037 	 */
   2038 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   2039 	    || (sc->sc_type == WM_T_82572))
   2040 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   2041 
   2042 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2043 	    || (sc->sc_type == WM_T_82580)
   2044 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   2045 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   2046 		sc->sc_flags |= WM_F_NEWQUEUE;
   2047 
   2048 	/* Set device properties (mactype) */
   2049 	dict = device_properties(sc->sc_dev);
   2050 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   2051 
   2052 	/*
   2053 	 * Map the device.  All devices support memory-mapped acccess,
   2054 	 * and it is really required for normal operation.
   2055 	 */
   2056 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   2057 	switch (memtype) {
   2058 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   2059 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   2060 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   2061 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   2062 		break;
   2063 	default:
   2064 		memh_valid = 0;
   2065 		break;
   2066 	}
   2067 
   2068 	if (memh_valid) {
   2069 		sc->sc_st = memt;
   2070 		sc->sc_sh = memh;
   2071 		sc->sc_ss = memsize;
   2072 	} else {
   2073 		aprint_error_dev(sc->sc_dev,
   2074 		    "unable to map device registers\n");
   2075 		return;
   2076 	}
   2077 
   2078 	/*
   2079 	 * In addition, i82544 and later support I/O mapped indirect
   2080 	 * register access.  It is not desirable (nor supported in
   2081 	 * this driver) to use it for normal operation, though it is
   2082 	 * required to work around bugs in some chip versions.
   2083 	 */
   2084 	switch (sc->sc_type) {
   2085 	case WM_T_82544:
   2086 	case WM_T_82541:
   2087 	case WM_T_82541_2:
   2088 	case WM_T_82547:
   2089 	case WM_T_82547_2:
   2090 		/* First we have to find the I/O BAR. */
   2091 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   2092 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   2093 			if (memtype == PCI_MAPREG_TYPE_IO)
   2094 				break;
   2095 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   2096 			    PCI_MAPREG_MEM_TYPE_64BIT)
   2097 				i += 4;	/* skip high bits, too */
   2098 		}
   2099 		if (i < PCI_MAPREG_END) {
   2100 			/*
   2101 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   2102 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   2103 			 * It's no problem because newer chips has no this
   2104 			 * bug.
   2105 			 *
   2106 			 * The i8254x doesn't apparently respond when the
   2107 			 * I/O BAR is 0, which looks somewhat like it's not
   2108 			 * been configured.
   2109 			 */
   2110 			preg = pci_conf_read(pc, pa->pa_tag, i);
   2111 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   2112 				aprint_error_dev(sc->sc_dev,
   2113 				    "WARNING: I/O BAR at zero.\n");
   2114 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   2115 			    0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)
   2116 			    == 0) {
   2117 				sc->sc_flags |= WM_F_IOH_VALID;
   2118 			} else
   2119 				aprint_error_dev(sc->sc_dev,
   2120 				    "WARNING: unable to map I/O space\n");
   2121 		}
   2122 		break;
   2123 	default:
   2124 		break;
   2125 	}
   2126 
   2127 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   2128 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   2129 	preg |= PCI_COMMAND_MASTER_ENABLE;
   2130 	if (sc->sc_type < WM_T_82542_2_1)
   2131 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   2132 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   2133 
   2134 	/* Power up chip */
   2135 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   2136 	    && error != EOPNOTSUPP) {
   2137 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   2138 		return;
   2139 	}
   2140 
   2141 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   2142 	/*
   2143 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   2144 	 * resource.
   2145 	 */
   2146 	if (sc->sc_nqueues > 1) {
   2147 		max_type = PCI_INTR_TYPE_MSIX;
   2148 		/*
   2149 		 *  82583 has a MSI-X capability in the PCI configuration space
   2150 		 * but it doesn't support it. At least the document doesn't
   2151 		 * say anything about MSI-X.
   2152 		 */
   2153 		counts[PCI_INTR_TYPE_MSIX]
   2154 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2155 	} else {
   2156 		max_type = PCI_INTR_TYPE_MSI;
   2157 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2158 	}
   2159 
   2160 	/* Allocation settings */
   2161 	counts[PCI_INTR_TYPE_MSI] = 1;
   2162 	counts[PCI_INTR_TYPE_INTX] = 1;
   2163 	/* overridden by disable flags */
   2164 	if (wm_disable_msi != 0) {
   2165 		counts[PCI_INTR_TYPE_MSI] = 0;
   2166 		if (wm_disable_msix != 0) {
   2167 			max_type = PCI_INTR_TYPE_INTX;
   2168 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2169 		}
   2170 	} else if (wm_disable_msix != 0) {
   2171 		max_type = PCI_INTR_TYPE_MSI;
   2172 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2173 	}
   2174 
   2175 alloc_retry:
   2176 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2177 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2178 		return;
   2179 	}
   2180 
   2181 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2182 		error = wm_setup_msix(sc);
   2183 		if (error) {
   2184 			pci_intr_release(pc, sc->sc_intrs,
   2185 			    counts[PCI_INTR_TYPE_MSIX]);
   2186 
   2187 			/* Setup for MSI: Disable MSI-X */
   2188 			max_type = PCI_INTR_TYPE_MSI;
   2189 			counts[PCI_INTR_TYPE_MSI] = 1;
   2190 			counts[PCI_INTR_TYPE_INTX] = 1;
   2191 			goto alloc_retry;
   2192 		}
   2193 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2194 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2195 		error = wm_setup_legacy(sc);
   2196 		if (error) {
   2197 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2198 			    counts[PCI_INTR_TYPE_MSI]);
   2199 
   2200 			/* The next try is for INTx: Disable MSI */
   2201 			max_type = PCI_INTR_TYPE_INTX;
   2202 			counts[PCI_INTR_TYPE_INTX] = 1;
   2203 			goto alloc_retry;
   2204 		}
   2205 	} else {
   2206 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2207 		error = wm_setup_legacy(sc);
   2208 		if (error) {
   2209 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2210 			    counts[PCI_INTR_TYPE_INTX]);
   2211 			return;
   2212 		}
   2213 	}
   2214 
   2215 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2216 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2217 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2218 	    WM_WORKQUEUE_FLAGS);
   2219 	if (error) {
   2220 		aprint_error_dev(sc->sc_dev,
   2221 		    "unable to create workqueue\n");
   2222 		goto out;
   2223 	}
   2224 
   2225 	/*
   2226 	 * Check the function ID (unit number of the chip).
   2227 	 */
   2228 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2229 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2230 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2231 	    || (sc->sc_type == WM_T_82580)
   2232 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2233 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2234 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2235 	else
   2236 		sc->sc_funcid = 0;
   2237 
   2238 	/*
   2239 	 * Determine a few things about the bus we're connected to.
   2240 	 */
   2241 	if (sc->sc_type < WM_T_82543) {
   2242 		/* We don't really know the bus characteristics here. */
   2243 		sc->sc_bus_speed = 33;
   2244 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2245 		/*
   2246 		 * CSA (Communication Streaming Architecture) is about as fast
   2247 		 * a 32-bit 66MHz PCI Bus.
   2248 		 */
   2249 		sc->sc_flags |= WM_F_CSA;
   2250 		sc->sc_bus_speed = 66;
   2251 		aprint_verbose_dev(sc->sc_dev,
   2252 		    "Communication Streaming Architecture\n");
   2253 		if (sc->sc_type == WM_T_82547) {
   2254 			callout_init(&sc->sc_txfifo_ch, WM_CALLOUT_FLAGS);
   2255 			callout_setfunc(&sc->sc_txfifo_ch,
   2256 			    wm_82547_txfifo_stall, sc);
   2257 			aprint_verbose_dev(sc->sc_dev,
   2258 			    "using 82547 Tx FIFO stall work-around\n");
   2259 		}
   2260 	} else if (sc->sc_type >= WM_T_82571) {
   2261 		sc->sc_flags |= WM_F_PCIE;
   2262 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2263 		    && (sc->sc_type != WM_T_ICH10)
   2264 		    && (sc->sc_type != WM_T_PCH)
   2265 		    && (sc->sc_type != WM_T_PCH2)
   2266 		    && (sc->sc_type != WM_T_PCH_LPT)
   2267 		    && (sc->sc_type != WM_T_PCH_SPT)
   2268 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2269 			/* ICH* and PCH* have no PCIe capability registers */
   2270 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2271 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2272 				NULL) == 0)
   2273 				aprint_error_dev(sc->sc_dev,
   2274 				    "unable to find PCIe capability\n");
   2275 		}
   2276 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2277 	} else {
   2278 		reg = CSR_READ(sc, WMREG_STATUS);
   2279 		if (reg & STATUS_BUS64)
   2280 			sc->sc_flags |= WM_F_BUS64;
   2281 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2282 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2283 
   2284 			sc->sc_flags |= WM_F_PCIX;
   2285 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2286 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2287 				aprint_error_dev(sc->sc_dev,
   2288 				    "unable to find PCIX capability\n");
   2289 			else if (sc->sc_type != WM_T_82545_3 &&
   2290 				 sc->sc_type != WM_T_82546_3) {
   2291 				/*
   2292 				 * Work around a problem caused by the BIOS
   2293 				 * setting the max memory read byte count
   2294 				 * incorrectly.
   2295 				 */
   2296 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2297 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2298 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2299 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2300 
   2301 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2302 				    PCIX_CMD_BYTECNT_SHIFT;
   2303 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2304 				    PCIX_STATUS_MAXB_SHIFT;
   2305 				if (bytecnt > maxb) {
   2306 					aprint_verbose_dev(sc->sc_dev,
   2307 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2308 					    512 << bytecnt, 512 << maxb);
   2309 					pcix_cmd = (pcix_cmd &
   2310 					    ~PCIX_CMD_BYTECNT_MASK) |
   2311 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2312 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2313 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2314 					    pcix_cmd);
   2315 				}
   2316 			}
   2317 		}
   2318 		/*
   2319 		 * The quad port adapter is special; it has a PCIX-PCIX
   2320 		 * bridge on the board, and can run the secondary bus at
   2321 		 * a higher speed.
   2322 		 */
   2323 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2324 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2325 								      : 66;
   2326 		} else if (sc->sc_flags & WM_F_PCIX) {
   2327 			switch (reg & STATUS_PCIXSPD_MASK) {
   2328 			case STATUS_PCIXSPD_50_66:
   2329 				sc->sc_bus_speed = 66;
   2330 				break;
   2331 			case STATUS_PCIXSPD_66_100:
   2332 				sc->sc_bus_speed = 100;
   2333 				break;
   2334 			case STATUS_PCIXSPD_100_133:
   2335 				sc->sc_bus_speed = 133;
   2336 				break;
   2337 			default:
   2338 				aprint_error_dev(sc->sc_dev,
   2339 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2340 				    reg & STATUS_PCIXSPD_MASK);
   2341 				sc->sc_bus_speed = 66;
   2342 				break;
   2343 			}
   2344 		} else
   2345 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2346 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2347 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2348 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2349 	}
   2350 
   2351 	/* clear interesting stat counters */
   2352 	CSR_READ(sc, WMREG_COLC);
   2353 	CSR_READ(sc, WMREG_RXERRC);
   2354 
   2355 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2356 	    || (sc->sc_type >= WM_T_ICH8))
   2357 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2358 	if (sc->sc_type >= WM_T_ICH8)
   2359 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2360 
   2361 	/* Set PHY, NVM mutex related stuff */
   2362 	switch (sc->sc_type) {
   2363 	case WM_T_82542_2_0:
   2364 	case WM_T_82542_2_1:
   2365 	case WM_T_82543:
   2366 	case WM_T_82544:
   2367 		/* Microwire */
   2368 		sc->nvm.read = wm_nvm_read_uwire;
   2369 		sc->sc_nvm_wordsize = 64;
   2370 		sc->sc_nvm_addrbits = 6;
   2371 		break;
   2372 	case WM_T_82540:
   2373 	case WM_T_82545:
   2374 	case WM_T_82545_3:
   2375 	case WM_T_82546:
   2376 	case WM_T_82546_3:
   2377 		/* Microwire */
   2378 		sc->nvm.read = wm_nvm_read_uwire;
   2379 		reg = CSR_READ(sc, WMREG_EECD);
   2380 		if (reg & EECD_EE_SIZE) {
   2381 			sc->sc_nvm_wordsize = 256;
   2382 			sc->sc_nvm_addrbits = 8;
   2383 		} else {
   2384 			sc->sc_nvm_wordsize = 64;
   2385 			sc->sc_nvm_addrbits = 6;
   2386 		}
   2387 		sc->sc_flags |= WM_F_LOCK_EECD;
   2388 		sc->nvm.acquire = wm_get_eecd;
   2389 		sc->nvm.release = wm_put_eecd;
   2390 		break;
   2391 	case WM_T_82541:
   2392 	case WM_T_82541_2:
   2393 	case WM_T_82547:
   2394 	case WM_T_82547_2:
   2395 		reg = CSR_READ(sc, WMREG_EECD);
   2396 		/*
   2397 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2398 		 * on 8254[17], so set flags and functios before calling it.
   2399 		 */
   2400 		sc->sc_flags |= WM_F_LOCK_EECD;
   2401 		sc->nvm.acquire = wm_get_eecd;
   2402 		sc->nvm.release = wm_put_eecd;
   2403 		if (reg & EECD_EE_TYPE) {
   2404 			/* SPI */
   2405 			sc->nvm.read = wm_nvm_read_spi;
   2406 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2407 			wm_nvm_set_addrbits_size_eecd(sc);
   2408 		} else {
   2409 			/* Microwire */
   2410 			sc->nvm.read = wm_nvm_read_uwire;
   2411 			if ((reg & EECD_EE_ABITS) != 0) {
   2412 				sc->sc_nvm_wordsize = 256;
   2413 				sc->sc_nvm_addrbits = 8;
   2414 			} else {
   2415 				sc->sc_nvm_wordsize = 64;
   2416 				sc->sc_nvm_addrbits = 6;
   2417 			}
   2418 		}
   2419 		break;
   2420 	case WM_T_82571:
   2421 	case WM_T_82572:
   2422 		/* SPI */
   2423 		sc->nvm.read = wm_nvm_read_eerd;
   2424 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2425 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2426 		wm_nvm_set_addrbits_size_eecd(sc);
   2427 		sc->phy.acquire = wm_get_swsm_semaphore;
   2428 		sc->phy.release = wm_put_swsm_semaphore;
   2429 		sc->nvm.acquire = wm_get_nvm_82571;
   2430 		sc->nvm.release = wm_put_nvm_82571;
   2431 		break;
   2432 	case WM_T_82573:
   2433 	case WM_T_82574:
   2434 	case WM_T_82583:
   2435 		sc->nvm.read = wm_nvm_read_eerd;
   2436 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2437 		if (sc->sc_type == WM_T_82573) {
   2438 			sc->phy.acquire = wm_get_swsm_semaphore;
   2439 			sc->phy.release = wm_put_swsm_semaphore;
   2440 			sc->nvm.acquire = wm_get_nvm_82571;
   2441 			sc->nvm.release = wm_put_nvm_82571;
   2442 		} else {
   2443 			/* Both PHY and NVM use the same semaphore. */
   2444 			sc->phy.acquire = sc->nvm.acquire
   2445 			    = wm_get_swfwhw_semaphore;
   2446 			sc->phy.release = sc->nvm.release
   2447 			    = wm_put_swfwhw_semaphore;
   2448 		}
   2449 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2450 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2451 			sc->sc_nvm_wordsize = 2048;
   2452 		} else {
   2453 			/* SPI */
   2454 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2455 			wm_nvm_set_addrbits_size_eecd(sc);
   2456 		}
   2457 		break;
   2458 	case WM_T_82575:
   2459 	case WM_T_82576:
   2460 	case WM_T_82580:
   2461 	case WM_T_I350:
   2462 	case WM_T_I354:
   2463 	case WM_T_80003:
   2464 		/* SPI */
   2465 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2466 		wm_nvm_set_addrbits_size_eecd(sc);
   2467 		if ((sc->sc_type == WM_T_80003)
   2468 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2469 			sc->nvm.read = wm_nvm_read_eerd;
   2470 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2471 		} else {
   2472 			sc->nvm.read = wm_nvm_read_spi;
   2473 			sc->sc_flags |= WM_F_LOCK_EECD;
   2474 		}
   2475 		sc->phy.acquire = wm_get_phy_82575;
   2476 		sc->phy.release = wm_put_phy_82575;
   2477 		sc->nvm.acquire = wm_get_nvm_80003;
   2478 		sc->nvm.release = wm_put_nvm_80003;
   2479 		break;
   2480 	case WM_T_ICH8:
   2481 	case WM_T_ICH9:
   2482 	case WM_T_ICH10:
   2483 	case WM_T_PCH:
   2484 	case WM_T_PCH2:
   2485 	case WM_T_PCH_LPT:
   2486 		sc->nvm.read = wm_nvm_read_ich8;
   2487 		/* FLASH */
   2488 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2489 		sc->sc_nvm_wordsize = 2048;
   2490 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2491 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2492 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2493 			aprint_error_dev(sc->sc_dev,
   2494 			    "can't map FLASH registers\n");
   2495 			goto out;
   2496 		}
   2497 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2498 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2499 		    ICH_FLASH_SECTOR_SIZE;
   2500 		sc->sc_ich8_flash_bank_size =
   2501 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2502 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2503 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2504 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2505 		sc->sc_flashreg_offset = 0;
   2506 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2507 		sc->phy.release = wm_put_swflag_ich8lan;
   2508 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2509 		sc->nvm.release = wm_put_nvm_ich8lan;
   2510 		break;
   2511 	case WM_T_PCH_SPT:
   2512 	case WM_T_PCH_CNP:
   2513 		sc->nvm.read = wm_nvm_read_spt;
   2514 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2515 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2516 		sc->sc_flasht = sc->sc_st;
   2517 		sc->sc_flashh = sc->sc_sh;
   2518 		sc->sc_ich8_flash_base = 0;
   2519 		sc->sc_nvm_wordsize =
   2520 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2521 		    * NVM_SIZE_MULTIPLIER;
   2522 		/* It is size in bytes, we want words */
   2523 		sc->sc_nvm_wordsize /= 2;
   2524 		/* Assume 2 banks */
   2525 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2526 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2527 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2528 		sc->phy.release = wm_put_swflag_ich8lan;
   2529 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2530 		sc->nvm.release = wm_put_nvm_ich8lan;
   2531 		break;
   2532 	case WM_T_I210:
   2533 	case WM_T_I211:
   2534 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2535 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2536 		if (wm_nvm_flash_presence_i210(sc)) {
   2537 			sc->nvm.read = wm_nvm_read_eerd;
   2538 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2539 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2540 			wm_nvm_set_addrbits_size_eecd(sc);
   2541 		} else {
   2542 			sc->nvm.read = wm_nvm_read_invm;
   2543 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2544 			sc->sc_nvm_wordsize = INVM_SIZE;
   2545 		}
   2546 		sc->phy.acquire = wm_get_phy_82575;
   2547 		sc->phy.release = wm_put_phy_82575;
   2548 		sc->nvm.acquire = wm_get_nvm_80003;
   2549 		sc->nvm.release = wm_put_nvm_80003;
   2550 		break;
   2551 	default:
   2552 		break;
   2553 	}
   2554 
   2555 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2556 	switch (sc->sc_type) {
   2557 	case WM_T_82571:
   2558 	case WM_T_82572:
   2559 		reg = CSR_READ(sc, WMREG_SWSM2);
   2560 		if ((reg & SWSM2_LOCK) == 0) {
   2561 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2562 			force_clear_smbi = true;
   2563 		} else
   2564 			force_clear_smbi = false;
   2565 		break;
   2566 	case WM_T_82573:
   2567 	case WM_T_82574:
   2568 	case WM_T_82583:
   2569 		force_clear_smbi = true;
   2570 		break;
   2571 	default:
   2572 		force_clear_smbi = false;
   2573 		break;
   2574 	}
   2575 	if (force_clear_smbi) {
   2576 		reg = CSR_READ(sc, WMREG_SWSM);
   2577 		if ((reg & SWSM_SMBI) != 0)
   2578 			aprint_error_dev(sc->sc_dev,
   2579 			    "Please update the Bootagent\n");
   2580 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2581 	}
   2582 
   2583 	/*
   2584 	 * Defer printing the EEPROM type until after verifying the checksum
   2585 	 * This allows the EEPROM type to be printed correctly in the case
   2586 	 * that no EEPROM is attached.
   2587 	 */
   2588 	/*
   2589 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2590 	 * this for later, so we can fail future reads from the EEPROM.
   2591 	 */
   2592 	if (wm_nvm_validate_checksum(sc)) {
   2593 		/*
   2594 		 * Read twice again because some PCI-e parts fail the
   2595 		 * first check due to the link being in sleep state.
   2596 		 */
   2597 		if (wm_nvm_validate_checksum(sc))
   2598 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2599 	}
   2600 
   2601 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2602 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2603 	else {
   2604 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2605 		    sc->sc_nvm_wordsize);
   2606 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2607 			aprint_verbose("iNVM");
   2608 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2609 			aprint_verbose("FLASH(HW)");
   2610 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2611 			aprint_verbose("FLASH");
   2612 		else {
   2613 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2614 				eetype = "SPI";
   2615 			else
   2616 				eetype = "MicroWire";
   2617 			aprint_verbose("(%d address bits) %s EEPROM",
   2618 			    sc->sc_nvm_addrbits, eetype);
   2619 		}
   2620 	}
   2621 	wm_nvm_version(sc);
   2622 	aprint_verbose("\n");
   2623 
   2624 	/*
   2625 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2626 	 * incorrect.
   2627 	 */
   2628 	wm_gmii_setup_phytype(sc, 0, 0);
   2629 
   2630 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2631 	switch (sc->sc_type) {
   2632 	case WM_T_ICH8:
   2633 	case WM_T_ICH9:
   2634 	case WM_T_ICH10:
   2635 	case WM_T_PCH:
   2636 	case WM_T_PCH2:
   2637 	case WM_T_PCH_LPT:
   2638 	case WM_T_PCH_SPT:
   2639 	case WM_T_PCH_CNP:
   2640 		apme_mask = WUC_APME;
   2641 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2642 		if ((eeprom_data & apme_mask) != 0)
   2643 			sc->sc_flags |= WM_F_WOL;
   2644 		break;
   2645 	default:
   2646 		break;
   2647 	}
   2648 
   2649 	/* Reset the chip to a known state. */
   2650 	wm_reset(sc);
   2651 
   2652 	/*
   2653 	 * Check for I21[01] PLL workaround.
   2654 	 *
   2655 	 * Three cases:
   2656 	 * a) Chip is I211.
   2657 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2658 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2659 	 */
   2660 	if (sc->sc_type == WM_T_I211)
   2661 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2662 	if (sc->sc_type == WM_T_I210) {
   2663 		if (!wm_nvm_flash_presence_i210(sc))
   2664 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2665 		else if ((sc->sc_nvm_ver_major < 3)
   2666 		    || ((sc->sc_nvm_ver_major == 3)
   2667 			&& (sc->sc_nvm_ver_minor < 25))) {
   2668 			aprint_verbose_dev(sc->sc_dev,
   2669 			    "ROM image version %d.%d is older than 3.25\n",
   2670 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2671 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2672 		}
   2673 	}
   2674 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2675 		wm_pll_workaround_i210(sc);
   2676 
   2677 	wm_get_wakeup(sc);
   2678 
   2679 	/* Non-AMT based hardware can now take control from firmware */
   2680 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2681 		wm_get_hw_control(sc);
   2682 
   2683 	/*
   2684 	 * Read the Ethernet address from the EEPROM, if not first found
   2685 	 * in device properties.
   2686 	 */
   2687 	ea = prop_dictionary_get(dict, "mac-address");
   2688 	if (ea != NULL) {
   2689 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2690 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2691 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2692 	} else {
   2693 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2694 			aprint_error_dev(sc->sc_dev,
   2695 			    "unable to read Ethernet address\n");
   2696 			goto out;
   2697 		}
   2698 	}
   2699 
   2700 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2701 	    ether_sprintf(enaddr));
   2702 
   2703 	/*
   2704 	 * Read the config info from the EEPROM, and set up various
   2705 	 * bits in the control registers based on their contents.
   2706 	 */
   2707 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2708 	if (pn != NULL) {
   2709 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2710 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2711 	} else {
   2712 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2713 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2714 			goto out;
   2715 		}
   2716 	}
   2717 
   2718 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2719 	if (pn != NULL) {
   2720 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2721 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2722 	} else {
   2723 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2724 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2725 			goto out;
   2726 		}
   2727 	}
   2728 
   2729 	/* check for WM_F_WOL */
   2730 	switch (sc->sc_type) {
   2731 	case WM_T_82542_2_0:
   2732 	case WM_T_82542_2_1:
   2733 	case WM_T_82543:
   2734 		/* dummy? */
   2735 		eeprom_data = 0;
   2736 		apme_mask = NVM_CFG3_APME;
   2737 		break;
   2738 	case WM_T_82544:
   2739 		apme_mask = NVM_CFG2_82544_APM_EN;
   2740 		eeprom_data = cfg2;
   2741 		break;
   2742 	case WM_T_82546:
   2743 	case WM_T_82546_3:
   2744 	case WM_T_82571:
   2745 	case WM_T_82572:
   2746 	case WM_T_82573:
   2747 	case WM_T_82574:
   2748 	case WM_T_82583:
   2749 	case WM_T_80003:
   2750 	case WM_T_82575:
   2751 	case WM_T_82576:
   2752 		apme_mask = NVM_CFG3_APME;
   2753 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2754 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2755 		break;
   2756 	case WM_T_82580:
   2757 	case WM_T_I350:
   2758 	case WM_T_I354:
   2759 	case WM_T_I210:
   2760 	case WM_T_I211:
   2761 		apme_mask = NVM_CFG3_APME;
   2762 		wm_nvm_read(sc,
   2763 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2764 		    1, &eeprom_data);
   2765 		break;
   2766 	case WM_T_ICH8:
   2767 	case WM_T_ICH9:
   2768 	case WM_T_ICH10:
   2769 	case WM_T_PCH:
   2770 	case WM_T_PCH2:
   2771 	case WM_T_PCH_LPT:
   2772 	case WM_T_PCH_SPT:
   2773 	case WM_T_PCH_CNP:
   2774 		/* Already checked before wm_reset () */
   2775 		apme_mask = eeprom_data = 0;
   2776 		break;
   2777 	default: /* XXX 82540 */
   2778 		apme_mask = NVM_CFG3_APME;
   2779 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2780 		break;
   2781 	}
   2782 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2783 	if ((eeprom_data & apme_mask) != 0)
   2784 		sc->sc_flags |= WM_F_WOL;
   2785 
   2786 	/*
   2787 	 * We have the eeprom settings, now apply the special cases
   2788 	 * where the eeprom may be wrong or the board won't support
   2789 	 * wake on lan on a particular port
   2790 	 */
   2791 	switch (sc->sc_pcidevid) {
   2792 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2793 		sc->sc_flags &= ~WM_F_WOL;
   2794 		break;
   2795 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2796 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2797 		/* Wake events only supported on port A for dual fiber
   2798 		 * regardless of eeprom setting */
   2799 		if (sc->sc_funcid == 1)
   2800 			sc->sc_flags &= ~WM_F_WOL;
   2801 		break;
   2802 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2803 		/* If quad port adapter, disable WoL on all but port A */
   2804 		if (sc->sc_funcid != 0)
   2805 			sc->sc_flags &= ~WM_F_WOL;
   2806 		break;
   2807 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2808 		/* Wake events only supported on port A for dual fiber
   2809 		 * regardless of eeprom setting */
   2810 		if (sc->sc_funcid == 1)
   2811 			sc->sc_flags &= ~WM_F_WOL;
   2812 		break;
   2813 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2814 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2815 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2816 		/* If quad port adapter, disable WoL on all but port A */
   2817 		if (sc->sc_funcid != 0)
   2818 			sc->sc_flags &= ~WM_F_WOL;
   2819 		break;
   2820 	}
   2821 
   2822 	if (sc->sc_type >= WM_T_82575) {
   2823 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2824 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2825 			    nvmword);
   2826 			if ((sc->sc_type == WM_T_82575) ||
   2827 			    (sc->sc_type == WM_T_82576)) {
   2828 				/* Check NVM for autonegotiation */
   2829 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2830 				    != 0)
   2831 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2832 			}
   2833 			if ((sc->sc_type == WM_T_82575) ||
   2834 			    (sc->sc_type == WM_T_I350)) {
   2835 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2836 					sc->sc_flags |= WM_F_MAS;
   2837 			}
   2838 		}
   2839 	}
   2840 
   2841 	/*
   2842 	 * XXX need special handling for some multiple port cards
   2843 	 * to disable a paticular port.
   2844 	 */
   2845 
   2846 	if (sc->sc_type >= WM_T_82544) {
   2847 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2848 		if (pn != NULL) {
   2849 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2850 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2851 		} else {
   2852 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2853 				aprint_error_dev(sc->sc_dev,
   2854 				    "unable to read SWDPIN\n");
   2855 				goto out;
   2856 			}
   2857 		}
   2858 	}
   2859 
   2860 	if (cfg1 & NVM_CFG1_ILOS)
   2861 		sc->sc_ctrl |= CTRL_ILOS;
   2862 
   2863 	/*
   2864 	 * XXX
   2865 	 * This code isn't correct because pin 2 and 3 are located
   2866 	 * in different position on newer chips. Check all datasheet.
   2867 	 *
   2868 	 * Until resolve this problem, check if a chip < 82580
   2869 	 */
   2870 	if (sc->sc_type <= WM_T_82580) {
   2871 		if (sc->sc_type >= WM_T_82544) {
   2872 			sc->sc_ctrl |=
   2873 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2874 			    CTRL_SWDPIO_SHIFT;
   2875 			sc->sc_ctrl |=
   2876 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2877 			    CTRL_SWDPINS_SHIFT;
   2878 		} else {
   2879 			sc->sc_ctrl |=
   2880 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2881 			    CTRL_SWDPIO_SHIFT;
   2882 		}
   2883 	}
   2884 
   2885 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2886 		wm_nvm_read(sc,
   2887 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2888 		    1, &nvmword);
   2889 		if (nvmword & NVM_CFG3_ILOS)
   2890 			sc->sc_ctrl |= CTRL_ILOS;
   2891 	}
   2892 
   2893 #if 0
   2894 	if (sc->sc_type >= WM_T_82544) {
   2895 		if (cfg1 & NVM_CFG1_IPS0)
   2896 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2897 		if (cfg1 & NVM_CFG1_IPS1)
   2898 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2899 		sc->sc_ctrl_ext |=
   2900 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2901 		    CTRL_EXT_SWDPIO_SHIFT;
   2902 		sc->sc_ctrl_ext |=
   2903 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2904 		    CTRL_EXT_SWDPINS_SHIFT;
   2905 	} else {
   2906 		sc->sc_ctrl_ext |=
   2907 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2908 		    CTRL_EXT_SWDPIO_SHIFT;
   2909 	}
   2910 #endif
   2911 
   2912 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2913 #if 0
   2914 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2915 #endif
   2916 
   2917 	if (sc->sc_type == WM_T_PCH) {
   2918 		uint16_t val;
   2919 
   2920 		/* Save the NVM K1 bit setting */
   2921 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2922 
   2923 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2924 			sc->sc_nvm_k1_enabled = 1;
   2925 		else
   2926 			sc->sc_nvm_k1_enabled = 0;
   2927 	}
   2928 
   2929 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2930 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2931 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2932 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2933 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2934 	    || sc->sc_type == WM_T_82573
   2935 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2936 		/* Copper only */
   2937 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2938 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2939 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2940 	    || (sc->sc_type ==WM_T_I211)) {
   2941 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2942 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2943 		switch (link_mode) {
   2944 		case CTRL_EXT_LINK_MODE_1000KX:
   2945 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2946 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2947 			break;
   2948 		case CTRL_EXT_LINK_MODE_SGMII:
   2949 			if (wm_sgmii_uses_mdio(sc)) {
   2950 				aprint_normal_dev(sc->sc_dev,
   2951 				    "SGMII(MDIO)\n");
   2952 				sc->sc_flags |= WM_F_SGMII;
   2953 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2954 				break;
   2955 			}
   2956 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2957 			/*FALLTHROUGH*/
   2958 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2959 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2960 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2961 				if (link_mode
   2962 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2963 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2964 					sc->sc_flags |= WM_F_SGMII;
   2965 					aprint_verbose_dev(sc->sc_dev,
   2966 					    "SGMII\n");
   2967 				} else {
   2968 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2969 					aprint_verbose_dev(sc->sc_dev,
   2970 					    "SERDES\n");
   2971 				}
   2972 				break;
   2973 			}
   2974 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2975 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2976 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2977 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2978 				sc->sc_flags |= WM_F_SGMII;
   2979 			}
   2980 			/* Do not change link mode for 100BaseFX */
   2981 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   2982 				break;
   2983 
   2984 			/* Change current link mode setting */
   2985 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2986 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2987 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2988 			else
   2989 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2990 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2991 			break;
   2992 		case CTRL_EXT_LINK_MODE_GMII:
   2993 		default:
   2994 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   2995 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2996 			break;
   2997 		}
   2998 
   2999 		reg &= ~CTRL_EXT_I2C_ENA;
   3000 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   3001 			reg |= CTRL_EXT_I2C_ENA;
   3002 		else
   3003 			reg &= ~CTRL_EXT_I2C_ENA;
   3004 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3005 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   3006 			if (!wm_sgmii_uses_mdio(sc))
   3007 				wm_gmii_setup_phytype(sc, 0, 0);
   3008 			wm_reset_mdicnfg_82580(sc);
   3009 		}
   3010 	} else if (sc->sc_type < WM_T_82543 ||
   3011 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   3012 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   3013 			aprint_error_dev(sc->sc_dev,
   3014 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   3015 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   3016 		}
   3017 	} else {
   3018 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   3019 			aprint_error_dev(sc->sc_dev,
   3020 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   3021 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3022 		}
   3023 	}
   3024 
   3025 	if (sc->sc_type >= WM_T_PCH2)
   3026 		sc->sc_flags |= WM_F_EEE;
   3027 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   3028 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   3029 		/* XXX: Need special handling for I354. (not yet) */
   3030 		if (sc->sc_type != WM_T_I354)
   3031 			sc->sc_flags |= WM_F_EEE;
   3032 	}
   3033 
   3034 	/*
   3035 	 * The I350 has a bug where it always strips the CRC whether
   3036 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   3037 	 */
   3038 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3039 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3040 		sc->sc_flags |= WM_F_CRC_STRIP;
   3041 
   3042 	/* Set device properties (macflags) */
   3043 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   3044 
   3045 	if (sc->sc_flags != 0) {
   3046 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   3047 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   3048 	}
   3049 
   3050 #ifdef WM_MPSAFE
   3051 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   3052 #else
   3053 	sc->sc_core_lock = NULL;
   3054 #endif
   3055 
   3056 	/* Initialize the media structures accordingly. */
   3057 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   3058 		wm_gmii_mediainit(sc, wmp->wmp_product);
   3059 	else
   3060 		wm_tbi_mediainit(sc); /* All others */
   3061 
   3062 	ifp = &sc->sc_ethercom.ec_if;
   3063 	xname = device_xname(sc->sc_dev);
   3064 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   3065 	ifp->if_softc = sc;
   3066 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   3067 #ifdef WM_MPSAFE
   3068 	ifp->if_extflags = IFEF_MPSAFE;
   3069 #endif
   3070 	ifp->if_ioctl = wm_ioctl;
   3071 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3072 		ifp->if_start = wm_nq_start;
   3073 		/*
   3074 		 * When the number of CPUs is one and the controller can use
   3075 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   3076 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   3077 		 * and the other is used for link status changing.
   3078 		 * In this situation, wm_nq_transmit() is disadvantageous
   3079 		 * because of wm_select_txqueue() and pcq(9) overhead.
   3080 		 */
   3081 		if (wm_is_using_multiqueue(sc))
   3082 			ifp->if_transmit = wm_nq_transmit;
   3083 	} else {
   3084 		ifp->if_start = wm_start;
   3085 		/*
   3086 		 * wm_transmit() has the same disadvantages as wm_nq_transmit()
   3087 		 * described above.
   3088 		 */
   3089 		if (wm_is_using_multiqueue(sc))
   3090 			ifp->if_transmit = wm_transmit;
   3091 	}
   3092 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   3093 	ifp->if_init = wm_init;
   3094 	ifp->if_stop = wm_stop;
   3095 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   3096 	IFQ_SET_READY(&ifp->if_snd);
   3097 
   3098 	/* Check for jumbo frame */
   3099 	switch (sc->sc_type) {
   3100 	case WM_T_82573:
   3101 		/* XXX limited to 9234 if ASPM is disabled */
   3102 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   3103 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   3104 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3105 		break;
   3106 	case WM_T_82571:
   3107 	case WM_T_82572:
   3108 	case WM_T_82574:
   3109 	case WM_T_82583:
   3110 	case WM_T_82575:
   3111 	case WM_T_82576:
   3112 	case WM_T_82580:
   3113 	case WM_T_I350:
   3114 	case WM_T_I354:
   3115 	case WM_T_I210:
   3116 	case WM_T_I211:
   3117 	case WM_T_80003:
   3118 	case WM_T_ICH9:
   3119 	case WM_T_ICH10:
   3120 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   3121 	case WM_T_PCH_LPT:
   3122 	case WM_T_PCH_SPT:
   3123 	case WM_T_PCH_CNP:
   3124 		/* XXX limited to 9234 */
   3125 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3126 		break;
   3127 	case WM_T_PCH:
   3128 		/* XXX limited to 4096 */
   3129 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3130 		break;
   3131 	case WM_T_82542_2_0:
   3132 	case WM_T_82542_2_1:
   3133 	case WM_T_ICH8:
   3134 		/* No support for jumbo frame */
   3135 		break;
   3136 	default:
   3137 		/* ETHER_MAX_LEN_JUMBO */
   3138 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3139 		break;
   3140 	}
   3141 
   3142 	/* If we're a i82543 or greater, we can support VLANs. */
   3143 	if (sc->sc_type >= WM_T_82543) {
   3144 		sc->sc_ethercom.ec_capabilities |=
   3145 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   3146 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   3147 	}
   3148 
   3149 	if ((sc->sc_flags & WM_F_EEE) != 0)
   3150 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   3151 
   3152 	/*
   3153 	 * We can perform TCPv4 and UDPv4 checksums in-bound.  Only
   3154 	 * on i82543 and later.
   3155 	 */
   3156 	if (sc->sc_type >= WM_T_82543) {
   3157 		ifp->if_capabilities |=
   3158 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3159 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3160 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3161 		    IFCAP_CSUM_TCPv6_Tx |
   3162 		    IFCAP_CSUM_UDPv6_Tx;
   3163 	}
   3164 
   3165 	/*
   3166 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3167 	 *
   3168 	 *	82541GI (8086:1076) ... no
   3169 	 *	82572EI (8086:10b9) ... yes
   3170 	 */
   3171 	if (sc->sc_type >= WM_T_82571) {
   3172 		ifp->if_capabilities |=
   3173 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3174 	}
   3175 
   3176 	/*
   3177 	 * If we're a i82544 or greater (except i82547), we can do
   3178 	 * TCP segmentation offload.
   3179 	 */
   3180 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547)
   3181 		ifp->if_capabilities |= IFCAP_TSOv4;
   3182 
   3183 	if (sc->sc_type >= WM_T_82571)
   3184 		ifp->if_capabilities |= IFCAP_TSOv6;
   3185 
   3186 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3187 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3188 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3189 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3190 
   3191 	/* Attach the interface. */
   3192 	if_initialize(ifp);
   3193 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3194 	ether_ifattach(ifp, enaddr);
   3195 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3196 	if_register(ifp);
   3197 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3198 	    RND_FLAG_DEFAULT);
   3199 
   3200 #ifdef WM_EVENT_COUNTERS
   3201 	/* Attach event counters. */
   3202 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3203 	    NULL, xname, "linkintr");
   3204 
   3205 	if (sc->sc_type >= WM_T_82542_2_1) {
   3206 		evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3207 		    NULL, xname, "tx_xoff");
   3208 		evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3209 		    NULL, xname, "tx_xon");
   3210 		evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3211 		    NULL, xname, "rx_xoff");
   3212 		evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3213 		    NULL, xname, "rx_xon");
   3214 		evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3215 		    NULL, xname, "rx_macctl");
   3216 	}
   3217 
   3218 	evcnt_attach_dynamic(&sc->sc_ev_crcerrs, EVCNT_TYPE_MISC,
   3219 	    NULL, xname, "CRC Error");
   3220 	evcnt_attach_dynamic(&sc->sc_ev_symerrc, EVCNT_TYPE_MISC,
   3221 	    NULL, xname, "Symbol Error");
   3222 
   3223 	if (sc->sc_type >= WM_T_82543) {
   3224 		evcnt_attach_dynamic(&sc->sc_ev_algnerrc, EVCNT_TYPE_MISC,
   3225 		    NULL, xname, "Alignment Error");
   3226 		evcnt_attach_dynamic(&sc->sc_ev_rxerrc, EVCNT_TYPE_MISC,
   3227 		    NULL, xname, "Receive Error");
   3228 		evcnt_attach_dynamic(&sc->sc_ev_cexterr, EVCNT_TYPE_MISC,
   3229 		    NULL, xname, "Carrier Extension Error");
   3230 	}
   3231 
   3232 	evcnt_attach_dynamic(&sc->sc_ev_mpc, EVCNT_TYPE_MISC,
   3233 	    NULL, xname, "Missed Packets");
   3234 	evcnt_attach_dynamic(&sc->sc_ev_colc, EVCNT_TYPE_MISC,
   3235 	    NULL, xname, "Collision");
   3236 	evcnt_attach_dynamic(&sc->sc_ev_sec, EVCNT_TYPE_MISC,
   3237 	    NULL, xname, "Sequence Error");
   3238 	evcnt_attach_dynamic(&sc->sc_ev_rlec, EVCNT_TYPE_MISC,
   3239 	    NULL, xname, "Receive Length Error");
   3240 	evcnt_attach_dynamic(&sc->sc_ev_scc, EVCNT_TYPE_MISC,
   3241 	    NULL, xname, "Single Collision");
   3242 	evcnt_attach_dynamic(&sc->sc_ev_ecol, EVCNT_TYPE_MISC,
   3243 	    NULL, xname, "Excessive Collisions");
   3244 	evcnt_attach_dynamic(&sc->sc_ev_mcc, EVCNT_TYPE_MISC,
   3245 	    NULL, xname, "Multiple Collision");
   3246 	evcnt_attach_dynamic(&sc->sc_ev_latecol, EVCNT_TYPE_MISC,
   3247 	    NULL, xname, "Late Collisions");
   3248 	evcnt_attach_dynamic(&sc->sc_ev_dc, EVCNT_TYPE_MISC,
   3249 	    NULL, xname, "Defer");
   3250 	evcnt_attach_dynamic(&sc->sc_ev_gprc, EVCNT_TYPE_MISC,
   3251 	    NULL, xname, "Good Packets Rx");
   3252 	evcnt_attach_dynamic(&sc->sc_ev_bprc, EVCNT_TYPE_MISC,
   3253 	    NULL, xname, "Broadcast Packets Rx");
   3254 	evcnt_attach_dynamic(&sc->sc_ev_mprc, EVCNT_TYPE_MISC,
   3255 	    NULL, xname, "Multicast Packets Rx");
   3256 	evcnt_attach_dynamic(&sc->sc_ev_gptc, EVCNT_TYPE_MISC,
   3257 	    NULL, xname, "Good Packets Tx");
   3258 	evcnt_attach_dynamic(&sc->sc_ev_gorc, EVCNT_TYPE_MISC,
   3259 	    NULL, xname, "Good Octets Rx");
   3260 	evcnt_attach_dynamic(&sc->sc_ev_gotc, EVCNT_TYPE_MISC,
   3261 	    NULL, xname, "Good Octets Tx");
   3262 	evcnt_attach_dynamic(&sc->sc_ev_rnbc, EVCNT_TYPE_MISC,
   3263 	    NULL, xname, "Rx No Buffers");
   3264 	evcnt_attach_dynamic(&sc->sc_ev_ruc, EVCNT_TYPE_MISC,
   3265 	    NULL, xname, "Rx Undersize");
   3266 	evcnt_attach_dynamic(&sc->sc_ev_rfc, EVCNT_TYPE_MISC,
   3267 	    NULL, xname, "Rx Fragment");
   3268 	evcnt_attach_dynamic(&sc->sc_ev_roc, EVCNT_TYPE_MISC,
   3269 	    NULL, xname, "Rx Oversize");
   3270 	evcnt_attach_dynamic(&sc->sc_ev_rjc, EVCNT_TYPE_MISC,
   3271 	    NULL, xname, "Rx Jabber");
   3272 	evcnt_attach_dynamic(&sc->sc_ev_tor, EVCNT_TYPE_MISC,
   3273 	    NULL, xname, "Total Octets Rx");
   3274 	evcnt_attach_dynamic(&sc->sc_ev_tot, EVCNT_TYPE_MISC,
   3275 	    NULL, xname, "Total Octets Tx");
   3276 	evcnt_attach_dynamic(&sc->sc_ev_tpr, EVCNT_TYPE_MISC,
   3277 	    NULL, xname, "Total Packets Rx");
   3278 	evcnt_attach_dynamic(&sc->sc_ev_tpt, EVCNT_TYPE_MISC,
   3279 	    NULL, xname, "Total Packets Tx");
   3280 	evcnt_attach_dynamic(&sc->sc_ev_mptc, EVCNT_TYPE_MISC,
   3281 	    NULL, xname, "Multicast Packets Tx");
   3282 	evcnt_attach_dynamic(&sc->sc_ev_bptc, EVCNT_TYPE_MISC,
   3283 	    NULL, xname, "Broadcast Packets Tx Count");
   3284 	evcnt_attach_dynamic(&sc->sc_ev_prc64, EVCNT_TYPE_MISC,
   3285 	    NULL, xname, "Packets Rx (64 bytes)");
   3286 	evcnt_attach_dynamic(&sc->sc_ev_prc127, EVCNT_TYPE_MISC,
   3287 	    NULL, xname, "Packets Rx (65-127 bytes)");
   3288 	evcnt_attach_dynamic(&sc->sc_ev_prc255, EVCNT_TYPE_MISC,
   3289 	    NULL, xname, "Packets Rx (128-255 bytes)");
   3290 	evcnt_attach_dynamic(&sc->sc_ev_prc511, EVCNT_TYPE_MISC,
   3291 	    NULL, xname, "Packets Rx (255-511 bytes)");
   3292 	evcnt_attach_dynamic(&sc->sc_ev_prc1023, EVCNT_TYPE_MISC,
   3293 	    NULL, xname, "Packets Rx (512-1023 bytes)");
   3294 	evcnt_attach_dynamic(&sc->sc_ev_prc1522, EVCNT_TYPE_MISC,
   3295 	    NULL, xname, "Packets Rx (1024-1522 bytes)");
   3296 	evcnt_attach_dynamic(&sc->sc_ev_ptc64, EVCNT_TYPE_MISC,
   3297 	    NULL, xname, "Packets Tx (64 bytes)");
   3298 	evcnt_attach_dynamic(&sc->sc_ev_ptc127, EVCNT_TYPE_MISC,
   3299 	    NULL, xname, "Packets Tx (65-127 bytes)");
   3300 	evcnt_attach_dynamic(&sc->sc_ev_ptc255, EVCNT_TYPE_MISC,
   3301 	    NULL, xname, "Packets Tx (128-255 bytes)");
   3302 	evcnt_attach_dynamic(&sc->sc_ev_ptc511, EVCNT_TYPE_MISC,
   3303 	    NULL, xname, "Packets Tx (256-511 bytes)");
   3304 	evcnt_attach_dynamic(&sc->sc_ev_ptc1023, EVCNT_TYPE_MISC,
   3305 	    NULL, xname, "Packets Tx (512-1023 bytes)");
   3306 	evcnt_attach_dynamic(&sc->sc_ev_ptc1522, EVCNT_TYPE_MISC,
   3307 	    NULL, xname, "Packets Tx (1024-1522 Bytes)");
   3308 	evcnt_attach_dynamic(&sc->sc_ev_iac, EVCNT_TYPE_MISC,
   3309 	    NULL, xname, "Interrupt Assertion");
   3310 	evcnt_attach_dynamic(&sc->sc_ev_icrxptc, EVCNT_TYPE_MISC,
   3311 	    NULL, xname, "Intr. Cause Rx Pkt Timer Expire");
   3312 	evcnt_attach_dynamic(&sc->sc_ev_icrxatc, EVCNT_TYPE_MISC,
   3313 	    NULL, xname, "Intr. Cause Rx Abs Timer Expire");
   3314 	evcnt_attach_dynamic(&sc->sc_ev_ictxptc, EVCNT_TYPE_MISC,
   3315 	    NULL, xname, "Intr. Cause Tx Pkt Timer Expire");
   3316 	evcnt_attach_dynamic(&sc->sc_ev_ictxact, EVCNT_TYPE_MISC,
   3317 	    NULL, xname, "Intr. Cause Tx Abs Timer Expire");
   3318 	evcnt_attach_dynamic(&sc->sc_ev_ictxqec, EVCNT_TYPE_MISC,
   3319 	    NULL, xname, "Intr. Cause Tx Queue Empty");
   3320 	evcnt_attach_dynamic(&sc->sc_ev_ictxqmtc, EVCNT_TYPE_MISC,
   3321 	    NULL, xname, "Intr. Cause Tx Queue Min Thresh");
   3322 	evcnt_attach_dynamic(&sc->sc_ev_icrxdmtc, EVCNT_TYPE_MISC,
   3323 	    NULL, xname, "Intr. Cause Rx Desc Min Thresh");
   3324 	evcnt_attach_dynamic(&sc->sc_ev_icrxoc, EVCNT_TYPE_MISC,
   3325 	    NULL, xname, "Interrupt Cause Receiver Overrun");
   3326 	if (sc->sc_type >= WM_T_82543) {
   3327 		evcnt_attach_dynamic(&sc->sc_ev_tncrs, EVCNT_TYPE_MISC,
   3328 		    NULL, xname, "Tx with No CRS");
   3329 		evcnt_attach_dynamic(&sc->sc_ev_tsctc, EVCNT_TYPE_MISC,
   3330 		    NULL, xname, "TCP Segmentation Context Tx");
   3331 		evcnt_attach_dynamic(&sc->sc_ev_tsctfc, EVCNT_TYPE_MISC,
   3332 		    NULL, xname, "TCP Segmentation Context Tx Fail");
   3333 	}
   3334 	if (sc->sc_type >= WM_T_82540) {
   3335 		evcnt_attach_dynamic(&sc->sc_ev_mgtprc, EVCNT_TYPE_MISC,
   3336 		    NULL, xname, "Management Packets RX");
   3337 		evcnt_attach_dynamic(&sc->sc_ev_mgtpdc, EVCNT_TYPE_MISC,
   3338 		    NULL, xname, "Management Packets Dropped");
   3339 		evcnt_attach_dynamic(&sc->sc_ev_mgtptc, EVCNT_TYPE_MISC,
   3340 		    NULL, xname, "Management Packets TX");
   3341 	}
   3342 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003)) {
   3343 		evcnt_attach_dynamic(&sc->sc_ev_b2ogprc, EVCNT_TYPE_MISC,
   3344 		    NULL, xname, "BMC2OS Packets received by host");
   3345 		evcnt_attach_dynamic(&sc->sc_ev_o2bspc, EVCNT_TYPE_MISC,
   3346 		    NULL, xname, "OS2BMC Packets transmitted by host");
   3347 		evcnt_attach_dynamic(&sc->sc_ev_b2ospc, EVCNT_TYPE_MISC,
   3348 		    NULL, xname, "BMC2OS Packets sent by BMC");
   3349 		evcnt_attach_dynamic(&sc->sc_ev_o2bgptc, EVCNT_TYPE_MISC,
   3350 		    NULL, xname, "OS2BMC Packets received by BMC");
   3351 	}
   3352 #endif /* WM_EVENT_COUNTERS */
   3353 
   3354 	sc->sc_txrx_use_workqueue = false;
   3355 
   3356 	if (wm_phy_need_linkdown_discard(sc)) {
   3357 		DPRINTF(sc, WM_DEBUG_LINK,
   3358 		    ("%s: %s: Set linkdown discard flag\n",
   3359 			device_xname(sc->sc_dev), __func__));
   3360 		wm_set_linkdown_discard(sc);
   3361 	}
   3362 
   3363 	wm_init_sysctls(sc);
   3364 
   3365 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3366 		pmf_class_network_register(self, ifp);
   3367 	else
   3368 		aprint_error_dev(self, "couldn't establish power handler\n");
   3369 
   3370 	sc->sc_flags |= WM_F_ATTACHED;
   3371 out:
   3372 	return;
   3373 }
   3374 
   3375 /* The detach function (ca_detach) */
   3376 static int
   3377 wm_detach(device_t self, int flags __unused)
   3378 {
   3379 	struct wm_softc *sc = device_private(self);
   3380 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3381 	int i;
   3382 
   3383 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3384 		return 0;
   3385 
   3386 	/* Stop the interface. Callouts are stopped in it. */
   3387 	wm_stop(ifp, 1);
   3388 
   3389 	pmf_device_deregister(self);
   3390 
   3391 	sysctl_teardown(&sc->sc_sysctllog);
   3392 
   3393 #ifdef WM_EVENT_COUNTERS
   3394 	evcnt_detach(&sc->sc_ev_linkintr);
   3395 
   3396 	if (sc->sc_type >= WM_T_82542_2_1) {
   3397 		evcnt_detach(&sc->sc_ev_tx_xoff);
   3398 		evcnt_detach(&sc->sc_ev_tx_xon);
   3399 		evcnt_detach(&sc->sc_ev_rx_xoff);
   3400 		evcnt_detach(&sc->sc_ev_rx_xon);
   3401 		evcnt_detach(&sc->sc_ev_rx_macctl);
   3402 	}
   3403 
   3404 	evcnt_detach(&sc->sc_ev_crcerrs);
   3405 	evcnt_detach(&sc->sc_ev_symerrc);
   3406 
   3407 	if (sc->sc_type >= WM_T_82543) {
   3408 		evcnt_detach(&sc->sc_ev_algnerrc);
   3409 		evcnt_detach(&sc->sc_ev_rxerrc);
   3410 		evcnt_detach(&sc->sc_ev_cexterr);
   3411 	}
   3412 	evcnt_detach(&sc->sc_ev_mpc);
   3413 	evcnt_detach(&sc->sc_ev_colc);
   3414 	evcnt_detach(&sc->sc_ev_sec);
   3415 	evcnt_detach(&sc->sc_ev_rlec);
   3416 	evcnt_detach(&sc->sc_ev_scc);
   3417 	evcnt_detach(&sc->sc_ev_ecol);
   3418 	evcnt_detach(&sc->sc_ev_mcc);
   3419 	evcnt_detach(&sc->sc_ev_latecol);
   3420 	evcnt_detach(&sc->sc_ev_dc);
   3421 	evcnt_detach(&sc->sc_ev_gprc);
   3422 	evcnt_detach(&sc->sc_ev_bprc);
   3423 	evcnt_detach(&sc->sc_ev_mprc);
   3424 	evcnt_detach(&sc->sc_ev_gptc);
   3425 	evcnt_detach(&sc->sc_ev_gorc);
   3426 	evcnt_detach(&sc->sc_ev_gotc);
   3427 	evcnt_detach(&sc->sc_ev_rnbc);
   3428 	evcnt_detach(&sc->sc_ev_ruc);
   3429 	evcnt_detach(&sc->sc_ev_rfc);
   3430 	evcnt_detach(&sc->sc_ev_roc);
   3431 	evcnt_detach(&sc->sc_ev_rjc);
   3432 	evcnt_detach(&sc->sc_ev_tor);
   3433 	evcnt_detach(&sc->sc_ev_tot);
   3434 	evcnt_detach(&sc->sc_ev_tpr);
   3435 	evcnt_detach(&sc->sc_ev_tpt);
   3436 	evcnt_detach(&sc->sc_ev_mptc);
   3437 	evcnt_detach(&sc->sc_ev_bptc);
   3438 	evcnt_detach(&sc->sc_ev_prc64);
   3439 	evcnt_detach(&sc->sc_ev_prc127);
   3440 	evcnt_detach(&sc->sc_ev_prc255);
   3441 	evcnt_detach(&sc->sc_ev_prc511);
   3442 	evcnt_detach(&sc->sc_ev_prc1023);
   3443 	evcnt_detach(&sc->sc_ev_prc1522);
   3444 	evcnt_detach(&sc->sc_ev_ptc64);
   3445 	evcnt_detach(&sc->sc_ev_ptc127);
   3446 	evcnt_detach(&sc->sc_ev_ptc255);
   3447 	evcnt_detach(&sc->sc_ev_ptc511);
   3448 	evcnt_detach(&sc->sc_ev_ptc1023);
   3449 	evcnt_detach(&sc->sc_ev_ptc1522);
   3450 	evcnt_detach(&sc->sc_ev_iac);
   3451 	evcnt_detach(&sc->sc_ev_icrxptc);
   3452 	evcnt_detach(&sc->sc_ev_icrxatc);
   3453 	evcnt_detach(&sc->sc_ev_ictxptc);
   3454 	evcnt_detach(&sc->sc_ev_ictxact);
   3455 	evcnt_detach(&sc->sc_ev_ictxqec);
   3456 	evcnt_detach(&sc->sc_ev_ictxqmtc);
   3457 	evcnt_detach(&sc->sc_ev_icrxdmtc);
   3458 	evcnt_detach(&sc->sc_ev_icrxoc);
   3459 	if (sc->sc_type >= WM_T_82543) {
   3460 		evcnt_detach(&sc->sc_ev_tncrs);
   3461 		evcnt_detach(&sc->sc_ev_tsctc);
   3462 		evcnt_detach(&sc->sc_ev_tsctfc);
   3463 	}
   3464 	if (sc->sc_type >= WM_T_82540) {
   3465 		evcnt_detach(&sc->sc_ev_mgtprc);
   3466 		evcnt_detach(&sc->sc_ev_mgtpdc);
   3467 		evcnt_detach(&sc->sc_ev_mgtptc);
   3468 	}
   3469 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003)) {
   3470 		evcnt_detach(&sc->sc_ev_b2ogprc);
   3471 		evcnt_detach(&sc->sc_ev_o2bspc);
   3472 		evcnt_detach(&sc->sc_ev_b2ospc);
   3473 		evcnt_detach(&sc->sc_ev_o2bgptc);
   3474 	}
   3475 #endif /* WM_EVENT_COUNTERS */
   3476 
   3477 	rnd_detach_source(&sc->rnd_source);
   3478 
   3479 	/* Tell the firmware about the release */
   3480 	WM_CORE_LOCK(sc);
   3481 	wm_release_manageability(sc);
   3482 	wm_release_hw_control(sc);
   3483 	wm_enable_wakeup(sc);
   3484 	WM_CORE_UNLOCK(sc);
   3485 
   3486 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3487 
   3488 	ether_ifdetach(ifp);
   3489 	if_detach(ifp);
   3490 	if_percpuq_destroy(sc->sc_ipq);
   3491 
   3492 	/* Delete all remaining media. */
   3493 	ifmedia_fini(&sc->sc_mii.mii_media);
   3494 
   3495 	/* Unload RX dmamaps and free mbufs */
   3496 	for (i = 0; i < sc->sc_nqueues; i++) {
   3497 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3498 		mutex_enter(rxq->rxq_lock);
   3499 		wm_rxdrain(rxq);
   3500 		mutex_exit(rxq->rxq_lock);
   3501 	}
   3502 	/* Must unlock here */
   3503 
   3504 	/* Disestablish the interrupt handler */
   3505 	for (i = 0; i < sc->sc_nintrs; i++) {
   3506 		if (sc->sc_ihs[i] != NULL) {
   3507 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3508 			sc->sc_ihs[i] = NULL;
   3509 		}
   3510 	}
   3511 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3512 
   3513 	/* wm_stop() ensure workqueue is stopped. */
   3514 	workqueue_destroy(sc->sc_queue_wq);
   3515 
   3516 	for (i = 0; i < sc->sc_nqueues; i++)
   3517 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3518 
   3519 	wm_free_txrx_queues(sc);
   3520 
   3521 	/* Unmap the registers */
   3522 	if (sc->sc_ss) {
   3523 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3524 		sc->sc_ss = 0;
   3525 	}
   3526 	if (sc->sc_ios) {
   3527 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3528 		sc->sc_ios = 0;
   3529 	}
   3530 	if (sc->sc_flashs) {
   3531 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3532 		sc->sc_flashs = 0;
   3533 	}
   3534 
   3535 	if (sc->sc_core_lock)
   3536 		mutex_obj_free(sc->sc_core_lock);
   3537 	if (sc->sc_ich_phymtx)
   3538 		mutex_obj_free(sc->sc_ich_phymtx);
   3539 	if (sc->sc_ich_nvmmtx)
   3540 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3541 
   3542 	return 0;
   3543 }
   3544 
   3545 static bool
   3546 wm_suspend(device_t self, const pmf_qual_t *qual)
   3547 {
   3548 	struct wm_softc *sc = device_private(self);
   3549 
   3550 	wm_release_manageability(sc);
   3551 	wm_release_hw_control(sc);
   3552 	wm_enable_wakeup(sc);
   3553 
   3554 	return true;
   3555 }
   3556 
   3557 static bool
   3558 wm_resume(device_t self, const pmf_qual_t *qual)
   3559 {
   3560 	struct wm_softc *sc = device_private(self);
   3561 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3562 	pcireg_t reg;
   3563 	char buf[256];
   3564 
   3565 	reg = CSR_READ(sc, WMREG_WUS);
   3566 	if (reg != 0) {
   3567 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3568 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3569 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3570 	}
   3571 
   3572 	if (sc->sc_type >= WM_T_PCH2)
   3573 		wm_resume_workarounds_pchlan(sc);
   3574 	if ((ifp->if_flags & IFF_UP) == 0) {
   3575 		/* >= PCH_SPT hardware workaround before reset. */
   3576 		if (sc->sc_type >= WM_T_PCH_SPT)
   3577 			wm_flush_desc_rings(sc);
   3578 
   3579 		wm_reset(sc);
   3580 		/* Non-AMT based hardware can now take control from firmware */
   3581 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3582 			wm_get_hw_control(sc);
   3583 		wm_init_manageability(sc);
   3584 	} else {
   3585 		/*
   3586 		 * We called pmf_class_network_register(), so if_init() is
   3587 		 * automatically called when IFF_UP. wm_reset(),
   3588 		 * wm_get_hw_control() and wm_init_manageability() are called
   3589 		 * via wm_init().
   3590 		 */
   3591 	}
   3592 
   3593 	return true;
   3594 }
   3595 
   3596 /*
   3597  * wm_watchdog:		[ifnet interface function]
   3598  *
   3599  *	Watchdog timer handler.
   3600  */
   3601 static void
   3602 wm_watchdog(struct ifnet *ifp)
   3603 {
   3604 	int qid;
   3605 	struct wm_softc *sc = ifp->if_softc;
   3606 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3607 
   3608 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3609 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3610 
   3611 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3612 	}
   3613 
   3614 	/* IF any of queues hanged up, reset the interface. */
   3615 	if (hang_queue != 0) {
   3616 		(void)wm_init(ifp);
   3617 
   3618 		/*
   3619 		 * There are still some upper layer processing which call
   3620 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3621 		 */
   3622 		/* Try to get more packets going. */
   3623 		ifp->if_start(ifp);
   3624 	}
   3625 }
   3626 
   3627 
   3628 static void
   3629 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3630 {
   3631 
   3632 	mutex_enter(txq->txq_lock);
   3633 	if (txq->txq_sending &&
   3634 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3635 		wm_watchdog_txq_locked(ifp, txq, hang);
   3636 
   3637 	mutex_exit(txq->txq_lock);
   3638 }
   3639 
   3640 static void
   3641 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3642     uint16_t *hang)
   3643 {
   3644 	struct wm_softc *sc = ifp->if_softc;
   3645 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3646 
   3647 	KASSERT(mutex_owned(txq->txq_lock));
   3648 
   3649 	/*
   3650 	 * Since we're using delayed interrupts, sweep up
   3651 	 * before we report an error.
   3652 	 */
   3653 	wm_txeof(txq, UINT_MAX);
   3654 
   3655 	if (txq->txq_sending)
   3656 		*hang |= __BIT(wmq->wmq_id);
   3657 
   3658 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3659 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3660 		    device_xname(sc->sc_dev));
   3661 	} else {
   3662 #ifdef WM_DEBUG
   3663 		int i, j;
   3664 		struct wm_txsoft *txs;
   3665 #endif
   3666 		log(LOG_ERR,
   3667 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3668 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3669 		    txq->txq_next);
   3670 		if_statinc(ifp, if_oerrors);
   3671 #ifdef WM_DEBUG
   3672 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3673 		    i = WM_NEXTTXS(txq, i)) {
   3674 			txs = &txq->txq_soft[i];
   3675 			printf("txs %d tx %d -> %d\n",
   3676 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3677 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3678 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3679 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3680 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3681 					printf("\t %#08x%08x\n",
   3682 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3683 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3684 				} else {
   3685 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3686 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3687 					    txq->txq_descs[j].wtx_addr.wa_low);
   3688 					printf("\t %#04x%02x%02x%08x\n",
   3689 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3690 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3691 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3692 					    txq->txq_descs[j].wtx_cmdlen);
   3693 				}
   3694 				if (j == txs->txs_lastdesc)
   3695 					break;
   3696 			}
   3697 		}
   3698 #endif
   3699 	}
   3700 }
   3701 
   3702 /*
   3703  * wm_tick:
   3704  *
   3705  *	One second timer, used to check link status, sweep up
   3706  *	completed transmit jobs, etc.
   3707  */
   3708 static void
   3709 wm_tick(void *arg)
   3710 {
   3711 	struct wm_softc *sc = arg;
   3712 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3713 	uint64_t crcerrs, algnerrc, symerrc, mpc, colc,  sec, rlec, rxerrc,
   3714 	    cexterr;
   3715 #ifndef WM_MPSAFE
   3716 	int s = splnet();
   3717 #endif
   3718 
   3719 	WM_CORE_LOCK(sc);
   3720 
   3721 	if (sc->sc_core_stopping) {
   3722 		WM_CORE_UNLOCK(sc);
   3723 #ifndef WM_MPSAFE
   3724 		splx(s);
   3725 #endif
   3726 		return;
   3727 	}
   3728 
   3729 	crcerrs = CSR_READ(sc, WMREG_CRCERRS);
   3730 	symerrc = CSR_READ(sc, WMREG_SYMERRC);
   3731 	mpc = CSR_READ(sc, WMREG_MPC);
   3732 	colc = CSR_READ(sc, WMREG_COLC);
   3733 	sec = CSR_READ(sc, WMREG_SEC);
   3734 	rlec = CSR_READ(sc, WMREG_RLEC);
   3735 
   3736 	WM_EVCNT_ADD(&sc->sc_ev_crcerrs, crcerrs);
   3737 	WM_EVCNT_ADD(&sc->sc_ev_symerrc, symerrc);
   3738 	WM_EVCNT_ADD(&sc->sc_ev_mpc, mpc);
   3739 	WM_EVCNT_ADD(&sc->sc_ev_colc, colc);
   3740 	WM_EVCNT_ADD(&sc->sc_ev_sec, sec);
   3741 	WM_EVCNT_ADD(&sc->sc_ev_rlec, rlec);
   3742 
   3743 	if (sc->sc_type >= WM_T_82542_2_1) {
   3744 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3745 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3746 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3747 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3748 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3749 	}
   3750 	WM_EVCNT_ADD(&sc->sc_ev_scc, CSR_READ(sc, WMREG_SCC));
   3751 	WM_EVCNT_ADD(&sc->sc_ev_ecol, CSR_READ(sc, WMREG_ECOL));
   3752 	WM_EVCNT_ADD(&sc->sc_ev_mcc, CSR_READ(sc, WMREG_MCC));
   3753 	WM_EVCNT_ADD(&sc->sc_ev_latecol, CSR_READ(sc, WMREG_LATECOL));
   3754 	WM_EVCNT_ADD(&sc->sc_ev_dc, CSR_READ(sc, WMREG_DC));
   3755 	WM_EVCNT_ADD(&sc->sc_ev_gprc, CSR_READ(sc, WMREG_GPRC));
   3756 	WM_EVCNT_ADD(&sc->sc_ev_bprc, CSR_READ(sc, WMREG_BPRC));
   3757 	WM_EVCNT_ADD(&sc->sc_ev_mprc, CSR_READ(sc, WMREG_MPRC));
   3758 	WM_EVCNT_ADD(&sc->sc_ev_gptc, CSR_READ(sc, WMREG_GPTC));
   3759 
   3760 	WM_EVCNT_ADD(&sc->sc_ev_gorc,
   3761 	    CSR_READ(sc, WMREG_GORCL) + CSR_READ(sc, WMREG_GORCH));
   3762 	WM_EVCNT_ADD(&sc->sc_ev_gotc,
   3763 	    CSR_READ(sc, WMREG_GOTCL) + CSR_READ(sc, WMREG_GOTCH));
   3764 
   3765 	WM_EVCNT_ADD(&sc->sc_ev_rnbc, CSR_READ(sc, WMREG_RNBC));
   3766 	WM_EVCNT_ADD(&sc->sc_ev_ruc, CSR_READ(sc, WMREG_RUC));
   3767 	WM_EVCNT_ADD(&sc->sc_ev_rfc, CSR_READ(sc, WMREG_RFC));
   3768 	WM_EVCNT_ADD(&sc->sc_ev_roc, CSR_READ(sc, WMREG_ROC));
   3769 	WM_EVCNT_ADD(&sc->sc_ev_rjc, CSR_READ(sc, WMREG_RJC));
   3770 
   3771 	WM_EVCNT_ADD(&sc->sc_ev_tor,
   3772 	    CSR_READ(sc, WMREG_TORL) + CSR_READ(sc, WMREG_TORH));
   3773 	WM_EVCNT_ADD(&sc->sc_ev_tot,
   3774 	    CSR_READ(sc, WMREG_TOTL) + CSR_READ(sc, WMREG_TOTH));
   3775 
   3776 	WM_EVCNT_ADD(&sc->sc_ev_tpr, CSR_READ(sc, WMREG_TPR));
   3777 	WM_EVCNT_ADD(&sc->sc_ev_tpt, CSR_READ(sc, WMREG_TPT));
   3778 	WM_EVCNT_ADD(&sc->sc_ev_mptc, CSR_READ(sc, WMREG_MPTC));
   3779 	WM_EVCNT_ADD(&sc->sc_ev_bptc, CSR_READ(sc, WMREG_BPTC));
   3780 	WM_EVCNT_ADD(&sc->sc_ev_prc64, CSR_READ(sc, WMREG_PRC64));
   3781 	WM_EVCNT_ADD(&sc->sc_ev_prc127, CSR_READ(sc, WMREG_PRC127));
   3782 	WM_EVCNT_ADD(&sc->sc_ev_prc255, CSR_READ(sc, WMREG_PRC255));
   3783 	WM_EVCNT_ADD(&sc->sc_ev_prc511, CSR_READ(sc, WMREG_PRC511));
   3784 	WM_EVCNT_ADD(&sc->sc_ev_prc1023, CSR_READ(sc, WMREG_PRC1023));
   3785 	WM_EVCNT_ADD(&sc->sc_ev_prc1522, CSR_READ(sc, WMREG_PRC1522));
   3786 	WM_EVCNT_ADD(&sc->sc_ev_ptc64, CSR_READ(sc, WMREG_PTC64));
   3787 	WM_EVCNT_ADD(&sc->sc_ev_ptc127, CSR_READ(sc, WMREG_PTC127));
   3788 	WM_EVCNT_ADD(&sc->sc_ev_ptc255, CSR_READ(sc, WMREG_PTC255));
   3789 	WM_EVCNT_ADD(&sc->sc_ev_ptc511, CSR_READ(sc, WMREG_PTC511));
   3790 	WM_EVCNT_ADD(&sc->sc_ev_ptc1023, CSR_READ(sc, WMREG_PTC1023));
   3791 	WM_EVCNT_ADD(&sc->sc_ev_ptc1522, CSR_READ(sc, WMREG_PTC1522));
   3792 	WM_EVCNT_ADD(&sc->sc_ev_iac, CSR_READ(sc, WMREG_IAC));
   3793 	WM_EVCNT_ADD(&sc->sc_ev_icrxptc, CSR_READ(sc, WMREG_ICRXPTC));
   3794 	WM_EVCNT_ADD(&sc->sc_ev_icrxatc, CSR_READ(sc, WMREG_ICRXATC));
   3795 	WM_EVCNT_ADD(&sc->sc_ev_ictxptc, CSR_READ(sc, WMREG_ICTXPTC));
   3796 	WM_EVCNT_ADD(&sc->sc_ev_ictxact, CSR_READ(sc, WMREG_ICTXATC));
   3797 	WM_EVCNT_ADD(&sc->sc_ev_ictxqec, CSR_READ(sc, WMREG_ICTXQEC));
   3798 	WM_EVCNT_ADD(&sc->sc_ev_ictxqmtc, CSR_READ(sc, WMREG_ICTXQMTC));
   3799 	WM_EVCNT_ADD(&sc->sc_ev_icrxdmtc, CSR_READ(sc, WMREG_ICRXDMTC));
   3800 	WM_EVCNT_ADD(&sc->sc_ev_icrxoc, CSR_READ(sc, WMREG_ICRXOC));
   3801 
   3802 	if (sc->sc_type >= WM_T_82543) {
   3803 		algnerrc = CSR_READ(sc, WMREG_ALGNERRC);
   3804 		rxerrc = CSR_READ(sc, WMREG_RXERRC);
   3805 		cexterr = CSR_READ(sc, WMREG_CEXTERR);
   3806 		WM_EVCNT_ADD(&sc->sc_ev_algnerrc, algnerrc);
   3807 		WM_EVCNT_ADD(&sc->sc_ev_rxerrc, rxerrc);
   3808 		WM_EVCNT_ADD(&sc->sc_ev_cexterr, cexterr);
   3809 
   3810 		WM_EVCNT_ADD(&sc->sc_ev_tncrs, CSR_READ(sc, WMREG_TNCRS));
   3811 		WM_EVCNT_ADD(&sc->sc_ev_tsctc, CSR_READ(sc, WMREG_TSCTC));
   3812 		WM_EVCNT_ADD(&sc->sc_ev_tsctfc, CSR_READ(sc, WMREG_TSCTFC));
   3813 	} else
   3814 		algnerrc = rxerrc = cexterr = 0;
   3815 
   3816 	if (sc->sc_type >= WM_T_82540) {
   3817 		WM_EVCNT_ADD(&sc->sc_ev_mgtprc, CSR_READ(sc, WMREG_MGTPRC));
   3818 		WM_EVCNT_ADD(&sc->sc_ev_mgtpdc, CSR_READ(sc, WMREG_MGTPDC));
   3819 		WM_EVCNT_ADD(&sc->sc_ev_mgtptc, CSR_READ(sc, WMREG_MGTPTC));
   3820 	}
   3821 	if (((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003))
   3822 	    && ((CSR_READ(sc, WMREG_MANC) & MANC_EN_BMC2OS) != 0)) {
   3823 		WM_EVCNT_ADD(&sc->sc_ev_b2ogprc, CSR_READ(sc, WMREG_B2OGPRC));
   3824 		WM_EVCNT_ADD(&sc->sc_ev_o2bspc, CSR_READ(sc, WMREG_O2BSPC));
   3825 		WM_EVCNT_ADD(&sc->sc_ev_b2ospc, CSR_READ(sc, WMREG_B2OSPC));
   3826 		WM_EVCNT_ADD(&sc->sc_ev_o2bgptc, CSR_READ(sc, WMREG_O2BGPTC));
   3827 	}
   3828 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   3829 	if_statadd_ref(nsr, if_collisions, colc);
   3830 	if_statadd_ref(nsr, if_ierrors,
   3831 	    crcerrs + algnerrc + symerrc + rxerrc + sec + cexterr + rlec);
   3832 	/*
   3833 	 * WMREG_RNBC is incremented when there are no available buffers in host
   3834 	 * memory. It does not mean the number of dropped packets, because an
   3835 	 * Ethernet controller can receive packets in such case if there is
   3836 	 * space in the phy's FIFO.
   3837 	 *
   3838 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3839 	 * own EVCNT instead of if_iqdrops.
   3840 	 */
   3841 	if_statadd_ref(nsr, if_iqdrops, mpc);
   3842 	IF_STAT_PUTREF(ifp);
   3843 
   3844 	if (sc->sc_flags & WM_F_HAS_MII)
   3845 		mii_tick(&sc->sc_mii);
   3846 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3847 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3848 		wm_serdes_tick(sc);
   3849 	else
   3850 		wm_tbi_tick(sc);
   3851 
   3852 	WM_CORE_UNLOCK(sc);
   3853 #ifndef WM_MPSAFE
   3854 	splx(s);
   3855 #endif
   3856 
   3857 	wm_watchdog(ifp);
   3858 
   3859 	callout_schedule(&sc->sc_tick_ch, hz);
   3860 }
   3861 
   3862 static int
   3863 wm_ifflags_cb(struct ethercom *ec)
   3864 {
   3865 	struct ifnet *ifp = &ec->ec_if;
   3866 	struct wm_softc *sc = ifp->if_softc;
   3867 	u_short iffchange;
   3868 	int ecchange;
   3869 	bool needreset = false;
   3870 	int rc = 0;
   3871 
   3872 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3873 		device_xname(sc->sc_dev), __func__));
   3874 
   3875 	WM_CORE_LOCK(sc);
   3876 
   3877 	/*
   3878 	 * Check for if_flags.
   3879 	 * Main usage is to prevent linkdown when opening bpf.
   3880 	 */
   3881 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3882 	sc->sc_if_flags = ifp->if_flags;
   3883 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3884 		needreset = true;
   3885 		goto ec;
   3886 	}
   3887 
   3888 	/* iff related updates */
   3889 	if ((iffchange & IFF_PROMISC) != 0)
   3890 		wm_set_filter(sc);
   3891 
   3892 	wm_set_vlan(sc);
   3893 
   3894 ec:
   3895 	/* Check for ec_capenable. */
   3896 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3897 	sc->sc_ec_capenable = ec->ec_capenable;
   3898 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3899 		needreset = true;
   3900 		goto out;
   3901 	}
   3902 
   3903 	/* ec related updates */
   3904 	wm_set_eee(sc);
   3905 
   3906 out:
   3907 	if (needreset)
   3908 		rc = ENETRESET;
   3909 	WM_CORE_UNLOCK(sc);
   3910 
   3911 	return rc;
   3912 }
   3913 
   3914 static bool
   3915 wm_phy_need_linkdown_discard(struct wm_softc *sc)
   3916 {
   3917 
   3918 	switch (sc->sc_phytype) {
   3919 	case WMPHY_82577: /* ihphy */
   3920 	case WMPHY_82578: /* atphy */
   3921 	case WMPHY_82579: /* ihphy */
   3922 	case WMPHY_I217: /* ihphy */
   3923 	case WMPHY_82580: /* ihphy */
   3924 	case WMPHY_I350: /* ihphy */
   3925 		return true;
   3926 	default:
   3927 		return false;
   3928 	}
   3929 }
   3930 
   3931 static void
   3932 wm_set_linkdown_discard(struct wm_softc *sc)
   3933 {
   3934 
   3935 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3936 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3937 
   3938 		mutex_enter(txq->txq_lock);
   3939 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
   3940 		mutex_exit(txq->txq_lock);
   3941 	}
   3942 }
   3943 
   3944 static void
   3945 wm_clear_linkdown_discard(struct wm_softc *sc)
   3946 {
   3947 
   3948 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3949 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3950 
   3951 		mutex_enter(txq->txq_lock);
   3952 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
   3953 		mutex_exit(txq->txq_lock);
   3954 	}
   3955 }
   3956 
   3957 /*
   3958  * wm_ioctl:		[ifnet interface function]
   3959  *
   3960  *	Handle control requests from the operator.
   3961  */
   3962 static int
   3963 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3964 {
   3965 	struct wm_softc *sc = ifp->if_softc;
   3966 	struct ifreq *ifr = (struct ifreq *)data;
   3967 	struct ifaddr *ifa = (struct ifaddr *)data;
   3968 	struct sockaddr_dl *sdl;
   3969 	int s, error;
   3970 
   3971 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3972 		device_xname(sc->sc_dev), __func__));
   3973 
   3974 #ifndef WM_MPSAFE
   3975 	s = splnet();
   3976 #endif
   3977 	switch (cmd) {
   3978 	case SIOCSIFMEDIA:
   3979 		WM_CORE_LOCK(sc);
   3980 		/* Flow control requires full-duplex mode. */
   3981 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3982 		    (ifr->ifr_media & IFM_FDX) == 0)
   3983 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3984 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3985 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3986 				/* We can do both TXPAUSE and RXPAUSE. */
   3987 				ifr->ifr_media |=
   3988 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3989 			}
   3990 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3991 		}
   3992 		WM_CORE_UNLOCK(sc);
   3993 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3994 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
   3995 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE) {
   3996 				DPRINTF(sc, WM_DEBUG_LINK,
   3997 				    ("%s: %s: Set linkdown discard flag\n",
   3998 					device_xname(sc->sc_dev), __func__));
   3999 				wm_set_linkdown_discard(sc);
   4000 			}
   4001 		}
   4002 		break;
   4003 	case SIOCINITIFADDR:
   4004 		WM_CORE_LOCK(sc);
   4005 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   4006 			sdl = satosdl(ifp->if_dl->ifa_addr);
   4007 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   4008 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   4009 			/* Unicast address is the first multicast entry */
   4010 			wm_set_filter(sc);
   4011 			error = 0;
   4012 			WM_CORE_UNLOCK(sc);
   4013 			break;
   4014 		}
   4015 		WM_CORE_UNLOCK(sc);
   4016 		/*FALLTHROUGH*/
   4017 	default:
   4018 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
   4019 			if (((ifp->if_flags & IFF_UP) != 0) &&
   4020 			    ((ifr->ifr_flags & IFF_UP) == 0)) {
   4021 				DPRINTF(sc, WM_DEBUG_LINK,
   4022 				    ("%s: %s: Set linkdown discard flag\n",
   4023 					device_xname(sc->sc_dev), __func__));
   4024 				wm_set_linkdown_discard(sc);
   4025 			}
   4026 		}
   4027 #ifdef WM_MPSAFE
   4028 		s = splnet();
   4029 #endif
   4030 		/* It may call wm_start, so unlock here */
   4031 		error = ether_ioctl(ifp, cmd, data);
   4032 #ifdef WM_MPSAFE
   4033 		splx(s);
   4034 #endif
   4035 		if (error != ENETRESET)
   4036 			break;
   4037 
   4038 		error = 0;
   4039 
   4040 		if (cmd == SIOCSIFCAP)
   4041 			error = if_init(ifp);
   4042 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   4043 			;
   4044 		else if (ifp->if_flags & IFF_RUNNING) {
   4045 			/*
   4046 			 * Multicast list has changed; set the hardware filter
   4047 			 * accordingly.
   4048 			 */
   4049 			WM_CORE_LOCK(sc);
   4050 			wm_set_filter(sc);
   4051 			WM_CORE_UNLOCK(sc);
   4052 		}
   4053 		break;
   4054 	}
   4055 
   4056 #ifndef WM_MPSAFE
   4057 	splx(s);
   4058 #endif
   4059 	return error;
   4060 }
   4061 
   4062 /* MAC address related */
   4063 
   4064 /*
   4065  * Get the offset of MAC address and return it.
   4066  * If error occured, use offset 0.
   4067  */
   4068 static uint16_t
   4069 wm_check_alt_mac_addr(struct wm_softc *sc)
   4070 {
   4071 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4072 	uint16_t offset = NVM_OFF_MACADDR;
   4073 
   4074 	/* Try to read alternative MAC address pointer */
   4075 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   4076 		return 0;
   4077 
   4078 	/* Check pointer if it's valid or not. */
   4079 	if ((offset == 0x0000) || (offset == 0xffff))
   4080 		return 0;
   4081 
   4082 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   4083 	/*
   4084 	 * Check whether alternative MAC address is valid or not.
   4085 	 * Some cards have non 0xffff pointer but those don't use
   4086 	 * alternative MAC address in reality.
   4087 	 *
   4088 	 * Check whether the broadcast bit is set or not.
   4089 	 */
   4090 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   4091 		if (((myea[0] & 0xff) & 0x01) == 0)
   4092 			return offset; /* Found */
   4093 
   4094 	/* Not found */
   4095 	return 0;
   4096 }
   4097 
   4098 static int
   4099 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   4100 {
   4101 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4102 	uint16_t offset = NVM_OFF_MACADDR;
   4103 	int do_invert = 0;
   4104 
   4105 	switch (sc->sc_type) {
   4106 	case WM_T_82580:
   4107 	case WM_T_I350:
   4108 	case WM_T_I354:
   4109 		/* EEPROM Top Level Partitioning */
   4110 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   4111 		break;
   4112 	case WM_T_82571:
   4113 	case WM_T_82575:
   4114 	case WM_T_82576:
   4115 	case WM_T_80003:
   4116 	case WM_T_I210:
   4117 	case WM_T_I211:
   4118 		offset = wm_check_alt_mac_addr(sc);
   4119 		if (offset == 0)
   4120 			if ((sc->sc_funcid & 0x01) == 1)
   4121 				do_invert = 1;
   4122 		break;
   4123 	default:
   4124 		if ((sc->sc_funcid & 0x01) == 1)
   4125 			do_invert = 1;
   4126 		break;
   4127 	}
   4128 
   4129 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   4130 		goto bad;
   4131 
   4132 	enaddr[0] = myea[0] & 0xff;
   4133 	enaddr[1] = myea[0] >> 8;
   4134 	enaddr[2] = myea[1] & 0xff;
   4135 	enaddr[3] = myea[1] >> 8;
   4136 	enaddr[4] = myea[2] & 0xff;
   4137 	enaddr[5] = myea[2] >> 8;
   4138 
   4139 	/*
   4140 	 * Toggle the LSB of the MAC address on the second port
   4141 	 * of some dual port cards.
   4142 	 */
   4143 	if (do_invert != 0)
   4144 		enaddr[5] ^= 1;
   4145 
   4146 	return 0;
   4147 
   4148  bad:
   4149 	return -1;
   4150 }
   4151 
   4152 /*
   4153  * wm_set_ral:
   4154  *
   4155  *	Set an entery in the receive address list.
   4156  */
   4157 static void
   4158 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   4159 {
   4160 	uint32_t ral_lo, ral_hi, addrl, addrh;
   4161 	uint32_t wlock_mac;
   4162 	int rv;
   4163 
   4164 	if (enaddr != NULL) {
   4165 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   4166 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   4167 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   4168 		ral_hi |= RAL_AV;
   4169 	} else {
   4170 		ral_lo = 0;
   4171 		ral_hi = 0;
   4172 	}
   4173 
   4174 	switch (sc->sc_type) {
   4175 	case WM_T_82542_2_0:
   4176 	case WM_T_82542_2_1:
   4177 	case WM_T_82543:
   4178 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   4179 		CSR_WRITE_FLUSH(sc);
   4180 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   4181 		CSR_WRITE_FLUSH(sc);
   4182 		break;
   4183 	case WM_T_PCH2:
   4184 	case WM_T_PCH_LPT:
   4185 	case WM_T_PCH_SPT:
   4186 	case WM_T_PCH_CNP:
   4187 		if (idx == 0) {
   4188 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4189 			CSR_WRITE_FLUSH(sc);
   4190 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4191 			CSR_WRITE_FLUSH(sc);
   4192 			return;
   4193 		}
   4194 		if (sc->sc_type != WM_T_PCH2) {
   4195 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   4196 			    FWSM_WLOCK_MAC);
   4197 			addrl = WMREG_SHRAL(idx - 1);
   4198 			addrh = WMREG_SHRAH(idx - 1);
   4199 		} else {
   4200 			wlock_mac = 0;
   4201 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   4202 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   4203 		}
   4204 
   4205 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   4206 			rv = wm_get_swflag_ich8lan(sc);
   4207 			if (rv != 0)
   4208 				return;
   4209 			CSR_WRITE(sc, addrl, ral_lo);
   4210 			CSR_WRITE_FLUSH(sc);
   4211 			CSR_WRITE(sc, addrh, ral_hi);
   4212 			CSR_WRITE_FLUSH(sc);
   4213 			wm_put_swflag_ich8lan(sc);
   4214 		}
   4215 
   4216 		break;
   4217 	default:
   4218 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4219 		CSR_WRITE_FLUSH(sc);
   4220 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4221 		CSR_WRITE_FLUSH(sc);
   4222 		break;
   4223 	}
   4224 }
   4225 
   4226 /*
   4227  * wm_mchash:
   4228  *
   4229  *	Compute the hash of the multicast address for the 4096-bit
   4230  *	multicast filter.
   4231  */
   4232 static uint32_t
   4233 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   4234 {
   4235 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   4236 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   4237 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   4238 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   4239 	uint32_t hash;
   4240 
   4241 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4242 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4243 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4244 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   4245 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   4246 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   4247 		return (hash & 0x3ff);
   4248 	}
   4249 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   4250 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   4251 
   4252 	return (hash & 0xfff);
   4253 }
   4254 
   4255 /*
   4256  *
   4257  *
   4258  */
   4259 static int
   4260 wm_rar_count(struct wm_softc *sc)
   4261 {
   4262 	int size;
   4263 
   4264 	switch (sc->sc_type) {
   4265 	case WM_T_ICH8:
   4266 		size = WM_RAL_TABSIZE_ICH8 -1;
   4267 		break;
   4268 	case WM_T_ICH9:
   4269 	case WM_T_ICH10:
   4270 	case WM_T_PCH:
   4271 		size = WM_RAL_TABSIZE_ICH8;
   4272 		break;
   4273 	case WM_T_PCH2:
   4274 		size = WM_RAL_TABSIZE_PCH2;
   4275 		break;
   4276 	case WM_T_PCH_LPT:
   4277 	case WM_T_PCH_SPT:
   4278 	case WM_T_PCH_CNP:
   4279 		size = WM_RAL_TABSIZE_PCH_LPT;
   4280 		break;
   4281 	case WM_T_82575:
   4282 	case WM_T_I210:
   4283 	case WM_T_I211:
   4284 		size = WM_RAL_TABSIZE_82575;
   4285 		break;
   4286 	case WM_T_82576:
   4287 	case WM_T_82580:
   4288 		size = WM_RAL_TABSIZE_82576;
   4289 		break;
   4290 	case WM_T_I350:
   4291 	case WM_T_I354:
   4292 		size = WM_RAL_TABSIZE_I350;
   4293 		break;
   4294 	default:
   4295 		size = WM_RAL_TABSIZE;
   4296 	}
   4297 
   4298 	return size;
   4299 }
   4300 
   4301 /*
   4302  * wm_set_filter:
   4303  *
   4304  *	Set up the receive filter.
   4305  */
   4306 static void
   4307 wm_set_filter(struct wm_softc *sc)
   4308 {
   4309 	struct ethercom *ec = &sc->sc_ethercom;
   4310 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   4311 	struct ether_multi *enm;
   4312 	struct ether_multistep step;
   4313 	bus_addr_t mta_reg;
   4314 	uint32_t hash, reg, bit;
   4315 	int i, size, ralmax, rv;
   4316 
   4317 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4318 		device_xname(sc->sc_dev), __func__));
   4319 
   4320 	if (sc->sc_type >= WM_T_82544)
   4321 		mta_reg = WMREG_CORDOVA_MTA;
   4322 	else
   4323 		mta_reg = WMREG_MTA;
   4324 
   4325 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   4326 
   4327 	if (ifp->if_flags & IFF_BROADCAST)
   4328 		sc->sc_rctl |= RCTL_BAM;
   4329 	if (ifp->if_flags & IFF_PROMISC) {
   4330 		sc->sc_rctl |= RCTL_UPE;
   4331 		ETHER_LOCK(ec);
   4332 		ec->ec_flags |= ETHER_F_ALLMULTI;
   4333 		ETHER_UNLOCK(ec);
   4334 		goto allmulti;
   4335 	}
   4336 
   4337 	/*
   4338 	 * Set the station address in the first RAL slot, and
   4339 	 * clear the remaining slots.
   4340 	 */
   4341 	size = wm_rar_count(sc);
   4342 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   4343 
   4344 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   4345 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   4346 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   4347 		switch (i) {
   4348 		case 0:
   4349 			/* We can use all entries */
   4350 			ralmax = size;
   4351 			break;
   4352 		case 1:
   4353 			/* Only RAR[0] */
   4354 			ralmax = 1;
   4355 			break;
   4356 		default:
   4357 			/* Available SHRA + RAR[0] */
   4358 			ralmax = i + 1;
   4359 		}
   4360 	} else
   4361 		ralmax = size;
   4362 	for (i = 1; i < size; i++) {
   4363 		if (i < ralmax)
   4364 			wm_set_ral(sc, NULL, i);
   4365 	}
   4366 
   4367 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4368 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4369 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4370 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   4371 		size = WM_ICH8_MC_TABSIZE;
   4372 	else
   4373 		size = WM_MC_TABSIZE;
   4374 	/* Clear out the multicast table. */
   4375 	for (i = 0; i < size; i++) {
   4376 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   4377 		CSR_WRITE_FLUSH(sc);
   4378 	}
   4379 
   4380 	ETHER_LOCK(ec);
   4381 	ETHER_FIRST_MULTI(step, ec, enm);
   4382 	while (enm != NULL) {
   4383 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   4384 			ec->ec_flags |= ETHER_F_ALLMULTI;
   4385 			ETHER_UNLOCK(ec);
   4386 			/*
   4387 			 * We must listen to a range of multicast addresses.
   4388 			 * For now, just accept all multicasts, rather than
   4389 			 * trying to set only those filter bits needed to match
   4390 			 * the range.  (At this time, the only use of address
   4391 			 * ranges is for IP multicast routing, for which the
   4392 			 * range is big enough to require all bits set.)
   4393 			 */
   4394 			goto allmulti;
   4395 		}
   4396 
   4397 		hash = wm_mchash(sc, enm->enm_addrlo);
   4398 
   4399 		reg = (hash >> 5);
   4400 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4401 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4402 		    || (sc->sc_type == WM_T_PCH2)
   4403 		    || (sc->sc_type == WM_T_PCH_LPT)
   4404 		    || (sc->sc_type == WM_T_PCH_SPT)
   4405 		    || (sc->sc_type == WM_T_PCH_CNP))
   4406 			reg &= 0x1f;
   4407 		else
   4408 			reg &= 0x7f;
   4409 		bit = hash & 0x1f;
   4410 
   4411 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   4412 		hash |= 1U << bit;
   4413 
   4414 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   4415 			/*
   4416 			 * 82544 Errata 9: Certain register cannot be written
   4417 			 * with particular alignments in PCI-X bus operation
   4418 			 * (FCAH, MTA and VFTA).
   4419 			 */
   4420 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   4421 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4422 			CSR_WRITE_FLUSH(sc);
   4423 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   4424 			CSR_WRITE_FLUSH(sc);
   4425 		} else {
   4426 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4427 			CSR_WRITE_FLUSH(sc);
   4428 		}
   4429 
   4430 		ETHER_NEXT_MULTI(step, enm);
   4431 	}
   4432 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4433 	ETHER_UNLOCK(ec);
   4434 
   4435 	goto setit;
   4436 
   4437  allmulti:
   4438 	sc->sc_rctl |= RCTL_MPE;
   4439 
   4440  setit:
   4441 	if (sc->sc_type >= WM_T_PCH2) {
   4442 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4443 		    && (ifp->if_mtu > ETHERMTU))
   4444 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
   4445 		else
   4446 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
   4447 		if (rv != 0)
   4448 			device_printf(sc->sc_dev,
   4449 			    "Failed to do workaround for jumbo frame.\n");
   4450 	}
   4451 
   4452 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   4453 }
   4454 
   4455 /* Reset and init related */
   4456 
   4457 static void
   4458 wm_set_vlan(struct wm_softc *sc)
   4459 {
   4460 
   4461 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4462 		device_xname(sc->sc_dev), __func__));
   4463 
   4464 	/* Deal with VLAN enables. */
   4465 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   4466 		sc->sc_ctrl |= CTRL_VME;
   4467 	else
   4468 		sc->sc_ctrl &= ~CTRL_VME;
   4469 
   4470 	/* Write the control registers. */
   4471 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4472 }
   4473 
   4474 static void
   4475 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   4476 {
   4477 	uint32_t gcr;
   4478 	pcireg_t ctrl2;
   4479 
   4480 	gcr = CSR_READ(sc, WMREG_GCR);
   4481 
   4482 	/* Only take action if timeout value is defaulted to 0 */
   4483 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   4484 		goto out;
   4485 
   4486 	if ((gcr & GCR_CAP_VER2) == 0) {
   4487 		gcr |= GCR_CMPL_TMOUT_10MS;
   4488 		goto out;
   4489 	}
   4490 
   4491 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4492 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   4493 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   4494 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4495 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   4496 
   4497 out:
   4498 	/* Disable completion timeout resend */
   4499 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   4500 
   4501 	CSR_WRITE(sc, WMREG_GCR, gcr);
   4502 }
   4503 
   4504 void
   4505 wm_get_auto_rd_done(struct wm_softc *sc)
   4506 {
   4507 	int i;
   4508 
   4509 	/* wait for eeprom to reload */
   4510 	switch (sc->sc_type) {
   4511 	case WM_T_82571:
   4512 	case WM_T_82572:
   4513 	case WM_T_82573:
   4514 	case WM_T_82574:
   4515 	case WM_T_82583:
   4516 	case WM_T_82575:
   4517 	case WM_T_82576:
   4518 	case WM_T_82580:
   4519 	case WM_T_I350:
   4520 	case WM_T_I354:
   4521 	case WM_T_I210:
   4522 	case WM_T_I211:
   4523 	case WM_T_80003:
   4524 	case WM_T_ICH8:
   4525 	case WM_T_ICH9:
   4526 		for (i = 0; i < 10; i++) {
   4527 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   4528 				break;
   4529 			delay(1000);
   4530 		}
   4531 		if (i == 10) {
   4532 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4533 			    "complete\n", device_xname(sc->sc_dev));
   4534 		}
   4535 		break;
   4536 	default:
   4537 		break;
   4538 	}
   4539 }
   4540 
   4541 void
   4542 wm_lan_init_done(struct wm_softc *sc)
   4543 {
   4544 	uint32_t reg = 0;
   4545 	int i;
   4546 
   4547 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4548 		device_xname(sc->sc_dev), __func__));
   4549 
   4550 	/* Wait for eeprom to reload */
   4551 	switch (sc->sc_type) {
   4552 	case WM_T_ICH10:
   4553 	case WM_T_PCH:
   4554 	case WM_T_PCH2:
   4555 	case WM_T_PCH_LPT:
   4556 	case WM_T_PCH_SPT:
   4557 	case WM_T_PCH_CNP:
   4558 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4559 			reg = CSR_READ(sc, WMREG_STATUS);
   4560 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4561 				break;
   4562 			delay(100);
   4563 		}
   4564 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4565 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4566 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4567 		}
   4568 		break;
   4569 	default:
   4570 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4571 		    __func__);
   4572 		break;
   4573 	}
   4574 
   4575 	reg &= ~STATUS_LAN_INIT_DONE;
   4576 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4577 }
   4578 
   4579 void
   4580 wm_get_cfg_done(struct wm_softc *sc)
   4581 {
   4582 	int mask;
   4583 	uint32_t reg;
   4584 	int i;
   4585 
   4586 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4587 		device_xname(sc->sc_dev), __func__));
   4588 
   4589 	/* Wait for eeprom to reload */
   4590 	switch (sc->sc_type) {
   4591 	case WM_T_82542_2_0:
   4592 	case WM_T_82542_2_1:
   4593 		/* null */
   4594 		break;
   4595 	case WM_T_82543:
   4596 	case WM_T_82544:
   4597 	case WM_T_82540:
   4598 	case WM_T_82545:
   4599 	case WM_T_82545_3:
   4600 	case WM_T_82546:
   4601 	case WM_T_82546_3:
   4602 	case WM_T_82541:
   4603 	case WM_T_82541_2:
   4604 	case WM_T_82547:
   4605 	case WM_T_82547_2:
   4606 	case WM_T_82573:
   4607 	case WM_T_82574:
   4608 	case WM_T_82583:
   4609 		/* generic */
   4610 		delay(10*1000);
   4611 		break;
   4612 	case WM_T_80003:
   4613 	case WM_T_82571:
   4614 	case WM_T_82572:
   4615 	case WM_T_82575:
   4616 	case WM_T_82576:
   4617 	case WM_T_82580:
   4618 	case WM_T_I350:
   4619 	case WM_T_I354:
   4620 	case WM_T_I210:
   4621 	case WM_T_I211:
   4622 		if (sc->sc_type == WM_T_82571) {
   4623 			/* Only 82571 shares port 0 */
   4624 			mask = EEMNGCTL_CFGDONE_0;
   4625 		} else
   4626 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4627 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4628 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4629 				break;
   4630 			delay(1000);
   4631 		}
   4632 		if (i >= WM_PHY_CFG_TIMEOUT)
   4633 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
   4634 				device_xname(sc->sc_dev), __func__));
   4635 		break;
   4636 	case WM_T_ICH8:
   4637 	case WM_T_ICH9:
   4638 	case WM_T_ICH10:
   4639 	case WM_T_PCH:
   4640 	case WM_T_PCH2:
   4641 	case WM_T_PCH_LPT:
   4642 	case WM_T_PCH_SPT:
   4643 	case WM_T_PCH_CNP:
   4644 		delay(10*1000);
   4645 		if (sc->sc_type >= WM_T_ICH10)
   4646 			wm_lan_init_done(sc);
   4647 		else
   4648 			wm_get_auto_rd_done(sc);
   4649 
   4650 		/* Clear PHY Reset Asserted bit */
   4651 		reg = CSR_READ(sc, WMREG_STATUS);
   4652 		if ((reg & STATUS_PHYRA) != 0)
   4653 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4654 		break;
   4655 	default:
   4656 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4657 		    __func__);
   4658 		break;
   4659 	}
   4660 }
   4661 
   4662 int
   4663 wm_phy_post_reset(struct wm_softc *sc)
   4664 {
   4665 	device_t dev = sc->sc_dev;
   4666 	uint16_t reg;
   4667 	int rv = 0;
   4668 
   4669 	/* This function is only for ICH8 and newer. */
   4670 	if (sc->sc_type < WM_T_ICH8)
   4671 		return 0;
   4672 
   4673 	if (wm_phy_resetisblocked(sc)) {
   4674 		/* XXX */
   4675 		device_printf(dev, "PHY is blocked\n");
   4676 		return -1;
   4677 	}
   4678 
   4679 	/* Allow time for h/w to get to quiescent state after reset */
   4680 	delay(10*1000);
   4681 
   4682 	/* Perform any necessary post-reset workarounds */
   4683 	if (sc->sc_type == WM_T_PCH)
   4684 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4685 	else if (sc->sc_type == WM_T_PCH2)
   4686 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4687 	if (rv != 0)
   4688 		return rv;
   4689 
   4690 	/* Clear the host wakeup bit after lcd reset */
   4691 	if (sc->sc_type >= WM_T_PCH) {
   4692 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4693 		reg &= ~BM_WUC_HOST_WU_BIT;
   4694 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4695 	}
   4696 
   4697 	/* Configure the LCD with the extended configuration region in NVM */
   4698 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4699 		return rv;
   4700 
   4701 	/* Configure the LCD with the OEM bits in NVM */
   4702 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4703 
   4704 	if (sc->sc_type == WM_T_PCH2) {
   4705 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4706 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4707 			delay(10 * 1000);
   4708 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4709 		}
   4710 		/* Set EEE LPI Update Timer to 200usec */
   4711 		rv = sc->phy.acquire(sc);
   4712 		if (rv)
   4713 			return rv;
   4714 		rv = wm_write_emi_reg_locked(dev,
   4715 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4716 		sc->phy.release(sc);
   4717 	}
   4718 
   4719 	return rv;
   4720 }
   4721 
   4722 /* Only for PCH and newer */
   4723 static int
   4724 wm_write_smbus_addr(struct wm_softc *sc)
   4725 {
   4726 	uint32_t strap, freq;
   4727 	uint16_t phy_data;
   4728 	int rv;
   4729 
   4730 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4731 		device_xname(sc->sc_dev), __func__));
   4732 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4733 
   4734 	strap = CSR_READ(sc, WMREG_STRAP);
   4735 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4736 
   4737 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4738 	if (rv != 0)
   4739 		return -1;
   4740 
   4741 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4742 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4743 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4744 
   4745 	if (sc->sc_phytype == WMPHY_I217) {
   4746 		/* Restore SMBus frequency */
   4747 		if (freq --) {
   4748 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4749 			    | HV_SMB_ADDR_FREQ_HIGH);
   4750 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4751 			    HV_SMB_ADDR_FREQ_LOW);
   4752 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4753 			    HV_SMB_ADDR_FREQ_HIGH);
   4754 		} else
   4755 			DPRINTF(sc, WM_DEBUG_INIT,
   4756 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4757 				device_xname(sc->sc_dev), __func__));
   4758 	}
   4759 
   4760 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4761 	    phy_data);
   4762 }
   4763 
   4764 static int
   4765 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4766 {
   4767 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4768 	uint16_t phy_page = 0;
   4769 	int rv = 0;
   4770 
   4771 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4772 		device_xname(sc->sc_dev), __func__));
   4773 
   4774 	switch (sc->sc_type) {
   4775 	case WM_T_ICH8:
   4776 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4777 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4778 			return 0;
   4779 
   4780 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4781 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4782 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4783 			break;
   4784 		}
   4785 		/* FALLTHROUGH */
   4786 	case WM_T_PCH:
   4787 	case WM_T_PCH2:
   4788 	case WM_T_PCH_LPT:
   4789 	case WM_T_PCH_SPT:
   4790 	case WM_T_PCH_CNP:
   4791 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4792 		break;
   4793 	default:
   4794 		return 0;
   4795 	}
   4796 
   4797 	if ((rv = sc->phy.acquire(sc)) != 0)
   4798 		return rv;
   4799 
   4800 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4801 	if ((reg & sw_cfg_mask) == 0)
   4802 		goto release;
   4803 
   4804 	/*
   4805 	 * Make sure HW does not configure LCD from PHY extended configuration
   4806 	 * before SW configuration
   4807 	 */
   4808 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4809 	if ((sc->sc_type < WM_T_PCH2)
   4810 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4811 		goto release;
   4812 
   4813 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4814 		device_xname(sc->sc_dev), __func__));
   4815 	/* word_addr is in DWORD */
   4816 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4817 
   4818 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4819 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4820 	if (cnf_size == 0)
   4821 		goto release;
   4822 
   4823 	if (((sc->sc_type == WM_T_PCH)
   4824 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4825 	    || (sc->sc_type > WM_T_PCH)) {
   4826 		/*
   4827 		 * HW configures the SMBus address and LEDs when the OEM and
   4828 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4829 		 * are cleared, SW will configure them instead.
   4830 		 */
   4831 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4832 			device_xname(sc->sc_dev), __func__));
   4833 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4834 			goto release;
   4835 
   4836 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4837 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4838 		    (uint16_t)reg);
   4839 		if (rv != 0)
   4840 			goto release;
   4841 	}
   4842 
   4843 	/* Configure LCD from extended configuration region. */
   4844 	for (i = 0; i < cnf_size; i++) {
   4845 		uint16_t reg_data, reg_addr;
   4846 
   4847 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4848 			goto release;
   4849 
   4850 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4851 			goto release;
   4852 
   4853 		if (reg_addr == IGPHY_PAGE_SELECT)
   4854 			phy_page = reg_data;
   4855 
   4856 		reg_addr &= IGPHY_MAXREGADDR;
   4857 		reg_addr |= phy_page;
   4858 
   4859 		KASSERT(sc->phy.writereg_locked != NULL);
   4860 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4861 		    reg_data);
   4862 	}
   4863 
   4864 release:
   4865 	sc->phy.release(sc);
   4866 	return rv;
   4867 }
   4868 
   4869 /*
   4870  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4871  *  @sc:       pointer to the HW structure
   4872  *  @d0_state: boolean if entering d0 or d3 device state
   4873  *
   4874  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4875  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4876  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4877  */
   4878 int
   4879 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4880 {
   4881 	uint32_t mac_reg;
   4882 	uint16_t oem_reg;
   4883 	int rv;
   4884 
   4885 	if (sc->sc_type < WM_T_PCH)
   4886 		return 0;
   4887 
   4888 	rv = sc->phy.acquire(sc);
   4889 	if (rv != 0)
   4890 		return rv;
   4891 
   4892 	if (sc->sc_type == WM_T_PCH) {
   4893 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4894 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4895 			goto release;
   4896 	}
   4897 
   4898 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4899 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4900 		goto release;
   4901 
   4902 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4903 
   4904 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4905 	if (rv != 0)
   4906 		goto release;
   4907 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4908 
   4909 	if (d0_state) {
   4910 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4911 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4912 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4913 			oem_reg |= HV_OEM_BITS_LPLU;
   4914 	} else {
   4915 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4916 		    != 0)
   4917 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4918 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4919 		    != 0)
   4920 			oem_reg |= HV_OEM_BITS_LPLU;
   4921 	}
   4922 
   4923 	/* Set Restart auto-neg to activate the bits */
   4924 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4925 	    && (wm_phy_resetisblocked(sc) == false))
   4926 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4927 
   4928 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4929 
   4930 release:
   4931 	sc->phy.release(sc);
   4932 
   4933 	return rv;
   4934 }
   4935 
   4936 /* Init hardware bits */
   4937 void
   4938 wm_initialize_hardware_bits(struct wm_softc *sc)
   4939 {
   4940 	uint32_t tarc0, tarc1, reg;
   4941 
   4942 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4943 		device_xname(sc->sc_dev), __func__));
   4944 
   4945 	/* For 82571 variant, 80003 and ICHs */
   4946 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4947 	    || (sc->sc_type >= WM_T_80003)) {
   4948 
   4949 		/* Transmit Descriptor Control 0 */
   4950 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4951 		reg |= TXDCTL_COUNT_DESC;
   4952 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4953 
   4954 		/* Transmit Descriptor Control 1 */
   4955 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4956 		reg |= TXDCTL_COUNT_DESC;
   4957 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4958 
   4959 		/* TARC0 */
   4960 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4961 		switch (sc->sc_type) {
   4962 		case WM_T_82571:
   4963 		case WM_T_82572:
   4964 		case WM_T_82573:
   4965 		case WM_T_82574:
   4966 		case WM_T_82583:
   4967 		case WM_T_80003:
   4968 			/* Clear bits 30..27 */
   4969 			tarc0 &= ~__BITS(30, 27);
   4970 			break;
   4971 		default:
   4972 			break;
   4973 		}
   4974 
   4975 		switch (sc->sc_type) {
   4976 		case WM_T_82571:
   4977 		case WM_T_82572:
   4978 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4979 
   4980 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4981 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4982 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4983 			/* 8257[12] Errata No.7 */
   4984 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4985 
   4986 			/* TARC1 bit 28 */
   4987 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4988 				tarc1 &= ~__BIT(28);
   4989 			else
   4990 				tarc1 |= __BIT(28);
   4991 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4992 
   4993 			/*
   4994 			 * 8257[12] Errata No.13
   4995 			 * Disable Dyamic Clock Gating.
   4996 			 */
   4997 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4998 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4999 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5000 			break;
   5001 		case WM_T_82573:
   5002 		case WM_T_82574:
   5003 		case WM_T_82583:
   5004 			if ((sc->sc_type == WM_T_82574)
   5005 			    || (sc->sc_type == WM_T_82583))
   5006 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   5007 
   5008 			/* Extended Device Control */
   5009 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5010 			reg &= ~__BIT(23);	/* Clear bit 23 */
   5011 			reg |= __BIT(22);	/* Set bit 22 */
   5012 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5013 
   5014 			/* Device Control */
   5015 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   5016 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5017 
   5018 			/* PCIe Control Register */
   5019 			/*
   5020 			 * 82573 Errata (unknown).
   5021 			 *
   5022 			 * 82574 Errata 25 and 82583 Errata 12
   5023 			 * "Dropped Rx Packets":
   5024 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   5025 			 */
   5026 			reg = CSR_READ(sc, WMREG_GCR);
   5027 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   5028 			CSR_WRITE(sc, WMREG_GCR, reg);
   5029 
   5030 			if ((sc->sc_type == WM_T_82574)
   5031 			    || (sc->sc_type == WM_T_82583)) {
   5032 				/*
   5033 				 * Document says this bit must be set for
   5034 				 * proper operation.
   5035 				 */
   5036 				reg = CSR_READ(sc, WMREG_GCR);
   5037 				reg |= __BIT(22);
   5038 				CSR_WRITE(sc, WMREG_GCR, reg);
   5039 
   5040 				/*
   5041 				 * Apply workaround for hardware errata
   5042 				 * documented in errata docs Fixes issue where
   5043 				 * some error prone or unreliable PCIe
   5044 				 * completions are occurring, particularly
   5045 				 * with ASPM enabled. Without fix, issue can
   5046 				 * cause Tx timeouts.
   5047 				 */
   5048 				reg = CSR_READ(sc, WMREG_GCR2);
   5049 				reg |= __BIT(0);
   5050 				CSR_WRITE(sc, WMREG_GCR2, reg);
   5051 			}
   5052 			break;
   5053 		case WM_T_80003:
   5054 			/* TARC0 */
   5055 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   5056 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   5057 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   5058 
   5059 			/* TARC1 bit 28 */
   5060 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5061 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5062 				tarc1 &= ~__BIT(28);
   5063 			else
   5064 				tarc1 |= __BIT(28);
   5065 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5066 			break;
   5067 		case WM_T_ICH8:
   5068 		case WM_T_ICH9:
   5069 		case WM_T_ICH10:
   5070 		case WM_T_PCH:
   5071 		case WM_T_PCH2:
   5072 		case WM_T_PCH_LPT:
   5073 		case WM_T_PCH_SPT:
   5074 		case WM_T_PCH_CNP:
   5075 			/* TARC0 */
   5076 			if (sc->sc_type == WM_T_ICH8) {
   5077 				/* Set TARC0 bits 29 and 28 */
   5078 				tarc0 |= __BITS(29, 28);
   5079 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   5080 				tarc0 |= __BIT(29);
   5081 				/*
   5082 				 *  Drop bit 28. From Linux.
   5083 				 * See I218/I219 spec update
   5084 				 * "5. Buffer Overrun While the I219 is
   5085 				 * Processing DMA Transactions"
   5086 				 */
   5087 				tarc0 &= ~__BIT(28);
   5088 			}
   5089 			/* Set TARC0 bits 23,24,26,27 */
   5090 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   5091 
   5092 			/* CTRL_EXT */
   5093 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5094 			reg |= __BIT(22);	/* Set bit 22 */
   5095 			/*
   5096 			 * Enable PHY low-power state when MAC is at D3
   5097 			 * w/o WoL
   5098 			 */
   5099 			if (sc->sc_type >= WM_T_PCH)
   5100 				reg |= CTRL_EXT_PHYPDEN;
   5101 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5102 
   5103 			/* TARC1 */
   5104 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5105 			/* bit 28 */
   5106 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5107 				tarc1 &= ~__BIT(28);
   5108 			else
   5109 				tarc1 |= __BIT(28);
   5110 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   5111 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5112 
   5113 			/* Device Status */
   5114 			if (sc->sc_type == WM_T_ICH8) {
   5115 				reg = CSR_READ(sc, WMREG_STATUS);
   5116 				reg &= ~__BIT(31);
   5117 				CSR_WRITE(sc, WMREG_STATUS, reg);
   5118 
   5119 			}
   5120 
   5121 			/* IOSFPC */
   5122 			if (sc->sc_type == WM_T_PCH_SPT) {
   5123 				reg = CSR_READ(sc, WMREG_IOSFPC);
   5124 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   5125 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   5126 			}
   5127 			/*
   5128 			 * Work-around descriptor data corruption issue during
   5129 			 * NFS v2 UDP traffic, just disable the NFS filtering
   5130 			 * capability.
   5131 			 */
   5132 			reg = CSR_READ(sc, WMREG_RFCTL);
   5133 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   5134 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5135 			break;
   5136 		default:
   5137 			break;
   5138 		}
   5139 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   5140 
   5141 		switch (sc->sc_type) {
   5142 		/*
   5143 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   5144 		 * Avoid RSS Hash Value bug.
   5145 		 */
   5146 		case WM_T_82571:
   5147 		case WM_T_82572:
   5148 		case WM_T_82573:
   5149 		case WM_T_80003:
   5150 		case WM_T_ICH8:
   5151 			reg = CSR_READ(sc, WMREG_RFCTL);
   5152 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   5153 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5154 			break;
   5155 		case WM_T_82574:
   5156 			/* Use extened Rx descriptor. */
   5157 			reg = CSR_READ(sc, WMREG_RFCTL);
   5158 			reg |= WMREG_RFCTL_EXSTEN;
   5159 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5160 			break;
   5161 		default:
   5162 			break;
   5163 		}
   5164 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   5165 		/*
   5166 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   5167 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   5168 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   5169 		 * Correctly by the Device"
   5170 		 *
   5171 		 * I354(C2000) Errata AVR53:
   5172 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   5173 		 * Hang"
   5174 		 */
   5175 		reg = CSR_READ(sc, WMREG_RFCTL);
   5176 		reg |= WMREG_RFCTL_IPV6EXDIS;
   5177 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   5178 	}
   5179 }
   5180 
   5181 static uint32_t
   5182 wm_rxpbs_adjust_82580(uint32_t val)
   5183 {
   5184 	uint32_t rv = 0;
   5185 
   5186 	if (val < __arraycount(wm_82580_rxpbs_table))
   5187 		rv = wm_82580_rxpbs_table[val];
   5188 
   5189 	return rv;
   5190 }
   5191 
   5192 /*
   5193  * wm_reset_phy:
   5194  *
   5195  *	generic PHY reset function.
   5196  *	Same as e1000_phy_hw_reset_generic()
   5197  */
   5198 static int
   5199 wm_reset_phy(struct wm_softc *sc)
   5200 {
   5201 	uint32_t reg;
   5202 
   5203 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5204 		device_xname(sc->sc_dev), __func__));
   5205 	if (wm_phy_resetisblocked(sc))
   5206 		return -1;
   5207 
   5208 	sc->phy.acquire(sc);
   5209 
   5210 	reg = CSR_READ(sc, WMREG_CTRL);
   5211 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   5212 	CSR_WRITE_FLUSH(sc);
   5213 
   5214 	delay(sc->phy.reset_delay_us);
   5215 
   5216 	CSR_WRITE(sc, WMREG_CTRL, reg);
   5217 	CSR_WRITE_FLUSH(sc);
   5218 
   5219 	delay(150);
   5220 
   5221 	sc->phy.release(sc);
   5222 
   5223 	wm_get_cfg_done(sc);
   5224 	wm_phy_post_reset(sc);
   5225 
   5226 	return 0;
   5227 }
   5228 
   5229 /*
   5230  * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
   5231  *
   5232  * In i219, the descriptor rings must be emptied before resetting the HW
   5233  * or before changing the device state to D3 during runtime (runtime PM).
   5234  *
   5235  * Failure to do this will cause the HW to enter a unit hang state which can
   5236  * only be released by PCI reset on the device.
   5237  *
   5238  * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
   5239  */
   5240 static void
   5241 wm_flush_desc_rings(struct wm_softc *sc)
   5242 {
   5243 	pcireg_t preg;
   5244 	uint32_t reg;
   5245 	struct wm_txqueue *txq;
   5246 	wiseman_txdesc_t *txd;
   5247 	int nexttx;
   5248 	uint32_t rctl;
   5249 
   5250 	/* First, disable MULR fix in FEXTNVM11 */
   5251 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   5252 	reg |= FEXTNVM11_DIS_MULRFIX;
   5253 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   5254 
   5255 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5256 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   5257 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   5258 		return;
   5259 
   5260 	/*
   5261 	 * Remove all descriptors from the tx_ring.
   5262 	 *
   5263 	 * We want to clear all pending descriptors from the TX ring. Zeroing
   5264 	 * happens when the HW reads the regs. We assign the ring itself as
   5265 	 * the data of the next descriptor. We don't care about the data we are
   5266 	 * about to reset the HW.
   5267 	 */
   5268 #ifdef WM_DEBUG
   5269 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
   5270 #endif
   5271 	reg = CSR_READ(sc, WMREG_TCTL);
   5272 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   5273 
   5274 	txq = &sc->sc_queue[0].wmq_txq;
   5275 	nexttx = txq->txq_next;
   5276 	txd = &txq->txq_descs[nexttx];
   5277 	wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
   5278 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   5279 	txd->wtx_fields.wtxu_status = 0;
   5280 	txd->wtx_fields.wtxu_options = 0;
   5281 	txd->wtx_fields.wtxu_vlan = 0;
   5282 
   5283 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5284 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5285 
   5286 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   5287 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   5288 	CSR_WRITE_FLUSH(sc);
   5289 	delay(250);
   5290 
   5291 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5292 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   5293 		return;
   5294 
   5295 	/*
   5296 	 * Mark all descriptors in the RX ring as consumed and disable the
   5297 	 * rx ring.
   5298 	 */
   5299 #ifdef WM_DEBUG
   5300 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   5301 #endif
   5302 	rctl = CSR_READ(sc, WMREG_RCTL);
   5303 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5304 	CSR_WRITE_FLUSH(sc);
   5305 	delay(150);
   5306 
   5307 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   5308 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   5309 	reg &= 0xffffc000;
   5310 	/*
   5311 	 * Update thresholds: prefetch threshold to 31, host threshold
   5312 	 * to 1 and make sure the granularity is "descriptors" and not
   5313 	 * "cache lines"
   5314 	 */
   5315 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   5316 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   5317 
   5318 	/* Momentarily enable the RX ring for the changes to take effect */
   5319 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   5320 	CSR_WRITE_FLUSH(sc);
   5321 	delay(150);
   5322 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5323 }
   5324 
   5325 /*
   5326  * wm_reset:
   5327  *
   5328  *	Reset the i82542 chip.
   5329  */
   5330 static void
   5331 wm_reset(struct wm_softc *sc)
   5332 {
   5333 	int phy_reset = 0;
   5334 	int i, error = 0;
   5335 	uint32_t reg;
   5336 	uint16_t kmreg;
   5337 	int rv;
   5338 
   5339 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5340 		device_xname(sc->sc_dev), __func__));
   5341 	KASSERT(sc->sc_type != 0);
   5342 
   5343 	/*
   5344 	 * Allocate on-chip memory according to the MTU size.
   5345 	 * The Packet Buffer Allocation register must be written
   5346 	 * before the chip is reset.
   5347 	 */
   5348 	switch (sc->sc_type) {
   5349 	case WM_T_82547:
   5350 	case WM_T_82547_2:
   5351 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5352 		    PBA_22K : PBA_30K;
   5353 		for (i = 0; i < sc->sc_nqueues; i++) {
   5354 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5355 			txq->txq_fifo_head = 0;
   5356 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   5357 			txq->txq_fifo_size =
   5358 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   5359 			txq->txq_fifo_stall = 0;
   5360 		}
   5361 		break;
   5362 	case WM_T_82571:
   5363 	case WM_T_82572:
   5364 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   5365 	case WM_T_80003:
   5366 		sc->sc_pba = PBA_32K;
   5367 		break;
   5368 	case WM_T_82573:
   5369 		sc->sc_pba = PBA_12K;
   5370 		break;
   5371 	case WM_T_82574:
   5372 	case WM_T_82583:
   5373 		sc->sc_pba = PBA_20K;
   5374 		break;
   5375 	case WM_T_82576:
   5376 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   5377 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   5378 		break;
   5379 	case WM_T_82580:
   5380 	case WM_T_I350:
   5381 	case WM_T_I354:
   5382 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   5383 		break;
   5384 	case WM_T_I210:
   5385 	case WM_T_I211:
   5386 		sc->sc_pba = PBA_34K;
   5387 		break;
   5388 	case WM_T_ICH8:
   5389 		/* Workaround for a bit corruption issue in FIFO memory */
   5390 		sc->sc_pba = PBA_8K;
   5391 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   5392 		break;
   5393 	case WM_T_ICH9:
   5394 	case WM_T_ICH10:
   5395 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   5396 		    PBA_14K : PBA_10K;
   5397 		break;
   5398 	case WM_T_PCH:
   5399 	case WM_T_PCH2:	/* XXX 14K? */
   5400 	case WM_T_PCH_LPT:
   5401 	case WM_T_PCH_SPT:
   5402 	case WM_T_PCH_CNP:
   5403 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
   5404 		    PBA_12K : PBA_26K;
   5405 		break;
   5406 	default:
   5407 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5408 		    PBA_40K : PBA_48K;
   5409 		break;
   5410 	}
   5411 	/*
   5412 	 * Only old or non-multiqueue devices have the PBA register
   5413 	 * XXX Need special handling for 82575.
   5414 	 */
   5415 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5416 	    || (sc->sc_type == WM_T_82575))
   5417 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   5418 
   5419 	/* Prevent the PCI-E bus from sticking */
   5420 	if (sc->sc_flags & WM_F_PCIE) {
   5421 		int timeout = 800;
   5422 
   5423 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   5424 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5425 
   5426 		while (timeout--) {
   5427 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   5428 			    == 0)
   5429 				break;
   5430 			delay(100);
   5431 		}
   5432 		if (timeout == 0)
   5433 			device_printf(sc->sc_dev,
   5434 			    "failed to disable bus mastering\n");
   5435 	}
   5436 
   5437 	/* Set the completion timeout for interface */
   5438 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   5439 	    || (sc->sc_type == WM_T_82580)
   5440 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5441 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   5442 		wm_set_pcie_completion_timeout(sc);
   5443 
   5444 	/* Clear interrupt */
   5445 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5446 	if (wm_is_using_msix(sc)) {
   5447 		if (sc->sc_type != WM_T_82574) {
   5448 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5449 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5450 		} else
   5451 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5452 	}
   5453 
   5454 	/* Stop the transmit and receive processes. */
   5455 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5456 	sc->sc_rctl &= ~RCTL_EN;
   5457 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   5458 	CSR_WRITE_FLUSH(sc);
   5459 
   5460 	/* XXX set_tbi_sbp_82543() */
   5461 
   5462 	delay(10*1000);
   5463 
   5464 	/* Must acquire the MDIO ownership before MAC reset */
   5465 	switch (sc->sc_type) {
   5466 	case WM_T_82573:
   5467 	case WM_T_82574:
   5468 	case WM_T_82583:
   5469 		error = wm_get_hw_semaphore_82573(sc);
   5470 		break;
   5471 	default:
   5472 		break;
   5473 	}
   5474 
   5475 	/*
   5476 	 * 82541 Errata 29? & 82547 Errata 28?
   5477 	 * See also the description about PHY_RST bit in CTRL register
   5478 	 * in 8254x_GBe_SDM.pdf.
   5479 	 */
   5480 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   5481 		CSR_WRITE(sc, WMREG_CTRL,
   5482 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   5483 		CSR_WRITE_FLUSH(sc);
   5484 		delay(5000);
   5485 	}
   5486 
   5487 	switch (sc->sc_type) {
   5488 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   5489 	case WM_T_82541:
   5490 	case WM_T_82541_2:
   5491 	case WM_T_82547:
   5492 	case WM_T_82547_2:
   5493 		/*
   5494 		 * On some chipsets, a reset through a memory-mapped write
   5495 		 * cycle can cause the chip to reset before completing the
   5496 		 * write cycle. This causes major headache that can be avoided
   5497 		 * by issuing the reset via indirect register writes through
   5498 		 * I/O space.
   5499 		 *
   5500 		 * So, if we successfully mapped the I/O BAR at attach time,
   5501 		 * use that. Otherwise, try our luck with a memory-mapped
   5502 		 * reset.
   5503 		 */
   5504 		if (sc->sc_flags & WM_F_IOH_VALID)
   5505 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   5506 		else
   5507 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   5508 		break;
   5509 	case WM_T_82545_3:
   5510 	case WM_T_82546_3:
   5511 		/* Use the shadow control register on these chips. */
   5512 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   5513 		break;
   5514 	case WM_T_80003:
   5515 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5516 		sc->phy.acquire(sc);
   5517 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5518 		sc->phy.release(sc);
   5519 		break;
   5520 	case WM_T_ICH8:
   5521 	case WM_T_ICH9:
   5522 	case WM_T_ICH10:
   5523 	case WM_T_PCH:
   5524 	case WM_T_PCH2:
   5525 	case WM_T_PCH_LPT:
   5526 	case WM_T_PCH_SPT:
   5527 	case WM_T_PCH_CNP:
   5528 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5529 		if (wm_phy_resetisblocked(sc) == false) {
   5530 			/*
   5531 			 * Gate automatic PHY configuration by hardware on
   5532 			 * non-managed 82579
   5533 			 */
   5534 			if ((sc->sc_type == WM_T_PCH2)
   5535 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   5536 				== 0))
   5537 				wm_gate_hw_phy_config_ich8lan(sc, true);
   5538 
   5539 			reg |= CTRL_PHY_RESET;
   5540 			phy_reset = 1;
   5541 		} else
   5542 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   5543 		sc->phy.acquire(sc);
   5544 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5545 		/* Don't insert a completion barrier when reset */
   5546 		delay(20*1000);
   5547 		mutex_exit(sc->sc_ich_phymtx);
   5548 		break;
   5549 	case WM_T_82580:
   5550 	case WM_T_I350:
   5551 	case WM_T_I354:
   5552 	case WM_T_I210:
   5553 	case WM_T_I211:
   5554 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5555 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5556 			CSR_WRITE_FLUSH(sc);
   5557 		delay(5000);
   5558 		break;
   5559 	case WM_T_82542_2_0:
   5560 	case WM_T_82542_2_1:
   5561 	case WM_T_82543:
   5562 	case WM_T_82540:
   5563 	case WM_T_82545:
   5564 	case WM_T_82546:
   5565 	case WM_T_82571:
   5566 	case WM_T_82572:
   5567 	case WM_T_82573:
   5568 	case WM_T_82574:
   5569 	case WM_T_82575:
   5570 	case WM_T_82576:
   5571 	case WM_T_82583:
   5572 	default:
   5573 		/* Everything else can safely use the documented method. */
   5574 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5575 		break;
   5576 	}
   5577 
   5578 	/* Must release the MDIO ownership after MAC reset */
   5579 	switch (sc->sc_type) {
   5580 	case WM_T_82573:
   5581 	case WM_T_82574:
   5582 	case WM_T_82583:
   5583 		if (error == 0)
   5584 			wm_put_hw_semaphore_82573(sc);
   5585 		break;
   5586 	default:
   5587 		break;
   5588 	}
   5589 
   5590 	/* Set Phy Config Counter to 50msec */
   5591 	if (sc->sc_type == WM_T_PCH2) {
   5592 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5593 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5594 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5595 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5596 	}
   5597 
   5598 	if (phy_reset != 0)
   5599 		wm_get_cfg_done(sc);
   5600 
   5601 	/* Reload EEPROM */
   5602 	switch (sc->sc_type) {
   5603 	case WM_T_82542_2_0:
   5604 	case WM_T_82542_2_1:
   5605 	case WM_T_82543:
   5606 	case WM_T_82544:
   5607 		delay(10);
   5608 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5609 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5610 		CSR_WRITE_FLUSH(sc);
   5611 		delay(2000);
   5612 		break;
   5613 	case WM_T_82540:
   5614 	case WM_T_82545:
   5615 	case WM_T_82545_3:
   5616 	case WM_T_82546:
   5617 	case WM_T_82546_3:
   5618 		delay(5*1000);
   5619 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5620 		break;
   5621 	case WM_T_82541:
   5622 	case WM_T_82541_2:
   5623 	case WM_T_82547:
   5624 	case WM_T_82547_2:
   5625 		delay(20000);
   5626 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5627 		break;
   5628 	case WM_T_82571:
   5629 	case WM_T_82572:
   5630 	case WM_T_82573:
   5631 	case WM_T_82574:
   5632 	case WM_T_82583:
   5633 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5634 			delay(10);
   5635 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5636 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5637 			CSR_WRITE_FLUSH(sc);
   5638 		}
   5639 		/* check EECD_EE_AUTORD */
   5640 		wm_get_auto_rd_done(sc);
   5641 		/*
   5642 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5643 		 * is set.
   5644 		 */
   5645 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5646 		    || (sc->sc_type == WM_T_82583))
   5647 			delay(25*1000);
   5648 		break;
   5649 	case WM_T_82575:
   5650 	case WM_T_82576:
   5651 	case WM_T_82580:
   5652 	case WM_T_I350:
   5653 	case WM_T_I354:
   5654 	case WM_T_I210:
   5655 	case WM_T_I211:
   5656 	case WM_T_80003:
   5657 		/* check EECD_EE_AUTORD */
   5658 		wm_get_auto_rd_done(sc);
   5659 		break;
   5660 	case WM_T_ICH8:
   5661 	case WM_T_ICH9:
   5662 	case WM_T_ICH10:
   5663 	case WM_T_PCH:
   5664 	case WM_T_PCH2:
   5665 	case WM_T_PCH_LPT:
   5666 	case WM_T_PCH_SPT:
   5667 	case WM_T_PCH_CNP:
   5668 		break;
   5669 	default:
   5670 		panic("%s: unknown type\n", __func__);
   5671 	}
   5672 
   5673 	/* Check whether EEPROM is present or not */
   5674 	switch (sc->sc_type) {
   5675 	case WM_T_82575:
   5676 	case WM_T_82576:
   5677 	case WM_T_82580:
   5678 	case WM_T_I350:
   5679 	case WM_T_I354:
   5680 	case WM_T_ICH8:
   5681 	case WM_T_ICH9:
   5682 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5683 			/* Not found */
   5684 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5685 			if (sc->sc_type == WM_T_82575)
   5686 				wm_reset_init_script_82575(sc);
   5687 		}
   5688 		break;
   5689 	default:
   5690 		break;
   5691 	}
   5692 
   5693 	if (phy_reset != 0)
   5694 		wm_phy_post_reset(sc);
   5695 
   5696 	if ((sc->sc_type == WM_T_82580)
   5697 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5698 		/* Clear global device reset status bit */
   5699 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5700 	}
   5701 
   5702 	/* Clear any pending interrupt events. */
   5703 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5704 	reg = CSR_READ(sc, WMREG_ICR);
   5705 	if (wm_is_using_msix(sc)) {
   5706 		if (sc->sc_type != WM_T_82574) {
   5707 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5708 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5709 		} else
   5710 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5711 	}
   5712 
   5713 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5714 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5715 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5716 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5717 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5718 		reg |= KABGTXD_BGSQLBIAS;
   5719 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5720 	}
   5721 
   5722 	/* Reload sc_ctrl */
   5723 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5724 
   5725 	wm_set_eee(sc);
   5726 
   5727 	/*
   5728 	 * For PCH, this write will make sure that any noise will be detected
   5729 	 * as a CRC error and be dropped rather than show up as a bad packet
   5730 	 * to the DMA engine
   5731 	 */
   5732 	if (sc->sc_type == WM_T_PCH)
   5733 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5734 
   5735 	if (sc->sc_type >= WM_T_82544)
   5736 		CSR_WRITE(sc, WMREG_WUC, 0);
   5737 
   5738 	if (sc->sc_type < WM_T_82575)
   5739 		wm_disable_aspm(sc); /* Workaround for some chips */
   5740 
   5741 	wm_reset_mdicnfg_82580(sc);
   5742 
   5743 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5744 		wm_pll_workaround_i210(sc);
   5745 
   5746 	if (sc->sc_type == WM_T_80003) {
   5747 		/* Default to TRUE to enable the MDIC W/A */
   5748 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5749 
   5750 		rv = wm_kmrn_readreg(sc,
   5751 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5752 		if (rv == 0) {
   5753 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5754 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5755 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5756 			else
   5757 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5758 		}
   5759 	}
   5760 }
   5761 
   5762 /*
   5763  * wm_add_rxbuf:
   5764  *
   5765  *	Add a receive buffer to the indiciated descriptor.
   5766  */
   5767 static int
   5768 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5769 {
   5770 	struct wm_softc *sc = rxq->rxq_sc;
   5771 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5772 	struct mbuf *m;
   5773 	int error;
   5774 
   5775 	KASSERT(mutex_owned(rxq->rxq_lock));
   5776 
   5777 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5778 	if (m == NULL)
   5779 		return ENOBUFS;
   5780 
   5781 	MCLGET(m, M_DONTWAIT);
   5782 	if ((m->m_flags & M_EXT) == 0) {
   5783 		m_freem(m);
   5784 		return ENOBUFS;
   5785 	}
   5786 
   5787 	if (rxs->rxs_mbuf != NULL)
   5788 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5789 
   5790 	rxs->rxs_mbuf = m;
   5791 
   5792 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5793 	/*
   5794 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5795 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5796 	 */
   5797 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5798 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5799 	if (error) {
   5800 		/* XXX XXX XXX */
   5801 		aprint_error_dev(sc->sc_dev,
   5802 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5803 		panic("wm_add_rxbuf");
   5804 	}
   5805 
   5806 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5807 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5808 
   5809 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5810 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5811 			wm_init_rxdesc(rxq, idx);
   5812 	} else
   5813 		wm_init_rxdesc(rxq, idx);
   5814 
   5815 	return 0;
   5816 }
   5817 
   5818 /*
   5819  * wm_rxdrain:
   5820  *
   5821  *	Drain the receive queue.
   5822  */
   5823 static void
   5824 wm_rxdrain(struct wm_rxqueue *rxq)
   5825 {
   5826 	struct wm_softc *sc = rxq->rxq_sc;
   5827 	struct wm_rxsoft *rxs;
   5828 	int i;
   5829 
   5830 	KASSERT(mutex_owned(rxq->rxq_lock));
   5831 
   5832 	for (i = 0; i < WM_NRXDESC; i++) {
   5833 		rxs = &rxq->rxq_soft[i];
   5834 		if (rxs->rxs_mbuf != NULL) {
   5835 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5836 			m_freem(rxs->rxs_mbuf);
   5837 			rxs->rxs_mbuf = NULL;
   5838 		}
   5839 	}
   5840 }
   5841 
   5842 /*
   5843  * Setup registers for RSS.
   5844  *
   5845  * XXX not yet VMDq support
   5846  */
   5847 static void
   5848 wm_init_rss(struct wm_softc *sc)
   5849 {
   5850 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5851 	int i;
   5852 
   5853 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5854 
   5855 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5856 		unsigned int qid, reta_ent;
   5857 
   5858 		qid  = i % sc->sc_nqueues;
   5859 		switch (sc->sc_type) {
   5860 		case WM_T_82574:
   5861 			reta_ent = __SHIFTIN(qid,
   5862 			    RETA_ENT_QINDEX_MASK_82574);
   5863 			break;
   5864 		case WM_T_82575:
   5865 			reta_ent = __SHIFTIN(qid,
   5866 			    RETA_ENT_QINDEX1_MASK_82575);
   5867 			break;
   5868 		default:
   5869 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5870 			break;
   5871 		}
   5872 
   5873 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5874 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5875 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5876 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5877 	}
   5878 
   5879 	rss_getkey((uint8_t *)rss_key);
   5880 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5881 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5882 
   5883 	if (sc->sc_type == WM_T_82574)
   5884 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5885 	else
   5886 		mrqc = MRQC_ENABLE_RSS_MQ;
   5887 
   5888 	/*
   5889 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5890 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5891 	 */
   5892 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5893 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5894 #if 0
   5895 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5896 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   5897 #endif
   5898 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   5899 
   5900 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5901 }
   5902 
   5903 /*
   5904  * Adjust TX and RX queue numbers which the system actulally uses.
   5905  *
   5906  * The numbers are affected by below parameters.
   5907  *     - The nubmer of hardware queues
   5908  *     - The number of MSI-X vectors (= "nvectors" argument)
   5909  *     - ncpu
   5910  */
   5911 static void
   5912 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5913 {
   5914 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5915 
   5916 	if (nvectors < 2) {
   5917 		sc->sc_nqueues = 1;
   5918 		return;
   5919 	}
   5920 
   5921 	switch (sc->sc_type) {
   5922 	case WM_T_82572:
   5923 		hw_ntxqueues = 2;
   5924 		hw_nrxqueues = 2;
   5925 		break;
   5926 	case WM_T_82574:
   5927 		hw_ntxqueues = 2;
   5928 		hw_nrxqueues = 2;
   5929 		break;
   5930 	case WM_T_82575:
   5931 		hw_ntxqueues = 4;
   5932 		hw_nrxqueues = 4;
   5933 		break;
   5934 	case WM_T_82576:
   5935 		hw_ntxqueues = 16;
   5936 		hw_nrxqueues = 16;
   5937 		break;
   5938 	case WM_T_82580:
   5939 	case WM_T_I350:
   5940 	case WM_T_I354:
   5941 		hw_ntxqueues = 8;
   5942 		hw_nrxqueues = 8;
   5943 		break;
   5944 	case WM_T_I210:
   5945 		hw_ntxqueues = 4;
   5946 		hw_nrxqueues = 4;
   5947 		break;
   5948 	case WM_T_I211:
   5949 		hw_ntxqueues = 2;
   5950 		hw_nrxqueues = 2;
   5951 		break;
   5952 		/*
   5953 		 * The below Ethernet controllers do not support MSI-X;
   5954 		 * this driver doesn't let them use multiqueue.
   5955 		 *     - WM_T_80003
   5956 		 *     - WM_T_ICH8
   5957 		 *     - WM_T_ICH9
   5958 		 *     - WM_T_ICH10
   5959 		 *     - WM_T_PCH
   5960 		 *     - WM_T_PCH2
   5961 		 *     - WM_T_PCH_LPT
   5962 		 */
   5963 	default:
   5964 		hw_ntxqueues = 1;
   5965 		hw_nrxqueues = 1;
   5966 		break;
   5967 	}
   5968 
   5969 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5970 
   5971 	/*
   5972 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5973 	 * the number of queues used actually.
   5974 	 */
   5975 	if (nvectors < hw_nqueues + 1)
   5976 		sc->sc_nqueues = nvectors - 1;
   5977 	else
   5978 		sc->sc_nqueues = hw_nqueues;
   5979 
   5980 	/*
   5981 	 * As queues more than CPUs cannot improve scaling, we limit
   5982 	 * the number of queues used actually.
   5983 	 */
   5984 	if (ncpu < sc->sc_nqueues)
   5985 		sc->sc_nqueues = ncpu;
   5986 }
   5987 
   5988 static inline bool
   5989 wm_is_using_msix(struct wm_softc *sc)
   5990 {
   5991 
   5992 	return (sc->sc_nintrs > 1);
   5993 }
   5994 
   5995 static inline bool
   5996 wm_is_using_multiqueue(struct wm_softc *sc)
   5997 {
   5998 
   5999 	return (sc->sc_nqueues > 1);
   6000 }
   6001 
   6002 static int
   6003 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   6004 {
   6005 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   6006 
   6007 	wmq->wmq_id = qidx;
   6008 	wmq->wmq_intr_idx = intr_idx;
   6009 	wmq->wmq_si = softint_establish(SOFTINT_NET | WM_SOFTINT_FLAGS,
   6010 	    wm_handle_queue, wmq);
   6011 	if (wmq->wmq_si != NULL)
   6012 		return 0;
   6013 
   6014 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   6015 	    wmq->wmq_id);
   6016 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   6017 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6018 	return ENOMEM;
   6019 }
   6020 
   6021 /*
   6022  * Both single interrupt MSI and INTx can use this function.
   6023  */
   6024 static int
   6025 wm_setup_legacy(struct wm_softc *sc)
   6026 {
   6027 	pci_chipset_tag_t pc = sc->sc_pc;
   6028 	const char *intrstr = NULL;
   6029 	char intrbuf[PCI_INTRSTR_LEN];
   6030 	int error;
   6031 
   6032 	error = wm_alloc_txrx_queues(sc);
   6033 	if (error) {
   6034 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6035 		    error);
   6036 		return ENOMEM;
   6037 	}
   6038 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   6039 	    sizeof(intrbuf));
   6040 #ifdef WM_MPSAFE
   6041 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   6042 #endif
   6043 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   6044 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   6045 	if (sc->sc_ihs[0] == NULL) {
   6046 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   6047 		    (pci_intr_type(pc, sc->sc_intrs[0])
   6048 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   6049 		return ENOMEM;
   6050 	}
   6051 
   6052 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   6053 	sc->sc_nintrs = 1;
   6054 
   6055 	return wm_softint_establish_queue(sc, 0, 0);
   6056 }
   6057 
   6058 static int
   6059 wm_setup_msix(struct wm_softc *sc)
   6060 {
   6061 	void *vih;
   6062 	kcpuset_t *affinity;
   6063 	int qidx, error, intr_idx, txrx_established;
   6064 	pci_chipset_tag_t pc = sc->sc_pc;
   6065 	const char *intrstr = NULL;
   6066 	char intrbuf[PCI_INTRSTR_LEN];
   6067 	char intr_xname[INTRDEVNAMEBUF];
   6068 
   6069 	if (sc->sc_nqueues < ncpu) {
   6070 		/*
   6071 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   6072 		 * interrupts start from CPU#1.
   6073 		 */
   6074 		sc->sc_affinity_offset = 1;
   6075 	} else {
   6076 		/*
   6077 		 * In this case, this device use all CPUs. So, we unify
   6078 		 * affinitied cpu_index to msix vector number for readability.
   6079 		 */
   6080 		sc->sc_affinity_offset = 0;
   6081 	}
   6082 
   6083 	error = wm_alloc_txrx_queues(sc);
   6084 	if (error) {
   6085 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6086 		    error);
   6087 		return ENOMEM;
   6088 	}
   6089 
   6090 	kcpuset_create(&affinity, false);
   6091 	intr_idx = 0;
   6092 
   6093 	/*
   6094 	 * TX and RX
   6095 	 */
   6096 	txrx_established = 0;
   6097 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6098 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6099 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   6100 
   6101 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6102 		    sizeof(intrbuf));
   6103 #ifdef WM_MPSAFE
   6104 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   6105 		    PCI_INTR_MPSAFE, true);
   6106 #endif
   6107 		memset(intr_xname, 0, sizeof(intr_xname));
   6108 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   6109 		    device_xname(sc->sc_dev), qidx);
   6110 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6111 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   6112 		if (vih == NULL) {
   6113 			aprint_error_dev(sc->sc_dev,
   6114 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   6115 			    intrstr ? " at " : "",
   6116 			    intrstr ? intrstr : "");
   6117 
   6118 			goto fail;
   6119 		}
   6120 		kcpuset_zero(affinity);
   6121 		/* Round-robin affinity */
   6122 		kcpuset_set(affinity, affinity_to);
   6123 		error = interrupt_distribute(vih, affinity, NULL);
   6124 		if (error == 0) {
   6125 			aprint_normal_dev(sc->sc_dev,
   6126 			    "for TX and RX interrupting at %s affinity to %u\n",
   6127 			    intrstr, affinity_to);
   6128 		} else {
   6129 			aprint_normal_dev(sc->sc_dev,
   6130 			    "for TX and RX interrupting at %s\n", intrstr);
   6131 		}
   6132 		sc->sc_ihs[intr_idx] = vih;
   6133 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   6134 			goto fail;
   6135 		txrx_established++;
   6136 		intr_idx++;
   6137 	}
   6138 
   6139 	/* LINK */
   6140 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6141 	    sizeof(intrbuf));
   6142 #ifdef WM_MPSAFE
   6143 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   6144 #endif
   6145 	memset(intr_xname, 0, sizeof(intr_xname));
   6146 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   6147 	    device_xname(sc->sc_dev));
   6148 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6149 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   6150 	if (vih == NULL) {
   6151 		aprint_error_dev(sc->sc_dev,
   6152 		    "unable to establish MSI-X(for LINK)%s%s\n",
   6153 		    intrstr ? " at " : "",
   6154 		    intrstr ? intrstr : "");
   6155 
   6156 		goto fail;
   6157 	}
   6158 	/* Keep default affinity to LINK interrupt */
   6159 	aprint_normal_dev(sc->sc_dev,
   6160 	    "for LINK interrupting at %s\n", intrstr);
   6161 	sc->sc_ihs[intr_idx] = vih;
   6162 	sc->sc_link_intr_idx = intr_idx;
   6163 
   6164 	sc->sc_nintrs = sc->sc_nqueues + 1;
   6165 	kcpuset_destroy(affinity);
   6166 	return 0;
   6167 
   6168  fail:
   6169 	for (qidx = 0; qidx < txrx_established; qidx++) {
   6170 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6171 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   6172 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6173 	}
   6174 
   6175 	kcpuset_destroy(affinity);
   6176 	return ENOMEM;
   6177 }
   6178 
   6179 static void
   6180 wm_unset_stopping_flags(struct wm_softc *sc)
   6181 {
   6182 	int i;
   6183 
   6184 	KASSERT(WM_CORE_LOCKED(sc));
   6185 
   6186 	/* Must unset stopping flags in ascending order. */
   6187 	for (i = 0; i < sc->sc_nqueues; i++) {
   6188 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6189 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6190 
   6191 		mutex_enter(txq->txq_lock);
   6192 		txq->txq_stopping = false;
   6193 		mutex_exit(txq->txq_lock);
   6194 
   6195 		mutex_enter(rxq->rxq_lock);
   6196 		rxq->rxq_stopping = false;
   6197 		mutex_exit(rxq->rxq_lock);
   6198 	}
   6199 
   6200 	sc->sc_core_stopping = false;
   6201 }
   6202 
   6203 static void
   6204 wm_set_stopping_flags(struct wm_softc *sc)
   6205 {
   6206 	int i;
   6207 
   6208 	KASSERT(WM_CORE_LOCKED(sc));
   6209 
   6210 	sc->sc_core_stopping = true;
   6211 
   6212 	/* Must set stopping flags in ascending order. */
   6213 	for (i = 0; i < sc->sc_nqueues; i++) {
   6214 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6215 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6216 
   6217 		mutex_enter(rxq->rxq_lock);
   6218 		rxq->rxq_stopping = true;
   6219 		mutex_exit(rxq->rxq_lock);
   6220 
   6221 		mutex_enter(txq->txq_lock);
   6222 		txq->txq_stopping = true;
   6223 		mutex_exit(txq->txq_lock);
   6224 	}
   6225 }
   6226 
   6227 /*
   6228  * Write interrupt interval value to ITR or EITR
   6229  */
   6230 static void
   6231 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   6232 {
   6233 
   6234 	if (!wmq->wmq_set_itr)
   6235 		return;
   6236 
   6237 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6238 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   6239 
   6240 		/*
   6241 		 * 82575 doesn't have CNT_INGR field.
   6242 		 * So, overwrite counter field by software.
   6243 		 */
   6244 		if (sc->sc_type == WM_T_82575)
   6245 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   6246 		else
   6247 			eitr |= EITR_CNT_INGR;
   6248 
   6249 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   6250 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   6251 		/*
   6252 		 * 82574 has both ITR and EITR. SET EITR when we use
   6253 		 * the multi queue function with MSI-X.
   6254 		 */
   6255 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   6256 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   6257 	} else {
   6258 		KASSERT(wmq->wmq_id == 0);
   6259 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   6260 	}
   6261 
   6262 	wmq->wmq_set_itr = false;
   6263 }
   6264 
   6265 /*
   6266  * TODO
   6267  * Below dynamic calculation of itr is almost the same as Linux igb,
   6268  * however it does not fit to wm(4). So, we will have been disable AIM
   6269  * until we will find appropriate calculation of itr.
   6270  */
   6271 /*
   6272  * Calculate interrupt interval value to be going to write register in
   6273  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   6274  */
   6275 static void
   6276 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   6277 {
   6278 #ifdef NOTYET
   6279 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6280 	struct wm_txqueue *txq = &wmq->wmq_txq;
   6281 	uint32_t avg_size = 0;
   6282 	uint32_t new_itr;
   6283 
   6284 	if (rxq->rxq_packets)
   6285 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   6286 	if (txq->txq_packets)
   6287 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   6288 
   6289 	if (avg_size == 0) {
   6290 		new_itr = 450; /* restore default value */
   6291 		goto out;
   6292 	}
   6293 
   6294 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   6295 	avg_size += 24;
   6296 
   6297 	/* Don't starve jumbo frames */
   6298 	avg_size = uimin(avg_size, 3000);
   6299 
   6300 	/* Give a little boost to mid-size frames */
   6301 	if ((avg_size > 300) && (avg_size < 1200))
   6302 		new_itr = avg_size / 3;
   6303 	else
   6304 		new_itr = avg_size / 2;
   6305 
   6306 out:
   6307 	/*
   6308 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   6309 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   6310 	 */
   6311 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   6312 		new_itr *= 4;
   6313 
   6314 	if (new_itr != wmq->wmq_itr) {
   6315 		wmq->wmq_itr = new_itr;
   6316 		wmq->wmq_set_itr = true;
   6317 	} else
   6318 		wmq->wmq_set_itr = false;
   6319 
   6320 	rxq->rxq_packets = 0;
   6321 	rxq->rxq_bytes = 0;
   6322 	txq->txq_packets = 0;
   6323 	txq->txq_bytes = 0;
   6324 #endif
   6325 }
   6326 
   6327 static void
   6328 wm_init_sysctls(struct wm_softc *sc)
   6329 {
   6330 	struct sysctllog **log;
   6331 	const struct sysctlnode *rnode, *qnode, *cnode;
   6332 	int i, rv;
   6333 	const char *dvname;
   6334 
   6335 	log = &sc->sc_sysctllog;
   6336 	dvname = device_xname(sc->sc_dev);
   6337 
   6338 	rv = sysctl_createv(log, 0, NULL, &rnode,
   6339 	    0, CTLTYPE_NODE, dvname,
   6340 	    SYSCTL_DESCR("wm information and settings"),
   6341 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   6342 	if (rv != 0)
   6343 		goto err;
   6344 
   6345 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6346 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   6347 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   6348 	if (rv != 0)
   6349 		goto teardown;
   6350 
   6351 	for (i = 0; i < sc->sc_nqueues; i++) {
   6352 		struct wm_queue *wmq = &sc->sc_queue[i];
   6353 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6354 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6355 
   6356 		snprintf(sc->sc_queue[i].sysctlname,
   6357 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
   6358 
   6359 		if (sysctl_createv(log, 0, &rnode, &qnode,
   6360 		    0, CTLTYPE_NODE,
   6361 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
   6362 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   6363 			break;
   6364 
   6365 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6366 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6367 		    "txq_free", SYSCTL_DESCR("TX queue free"),
   6368 		    NULL, 0, &txq->txq_free,
   6369 		    0, CTL_CREATE, CTL_EOL) != 0)
   6370 			break;
   6371 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6372 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6373 		    "txd_head", SYSCTL_DESCR("TX descriptor head"),
   6374 		    wm_sysctl_tdh_handler, 0, (void *)txq,
   6375 		    0, CTL_CREATE, CTL_EOL) != 0)
   6376 			break;
   6377 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6378 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6379 		    "txd_tail", SYSCTL_DESCR("TX descriptor tail"),
   6380 		    wm_sysctl_tdt_handler, 0, (void *)txq,
   6381 		    0, CTL_CREATE, CTL_EOL) != 0)
   6382 			break;
   6383 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6384 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6385 		    "txq_next", SYSCTL_DESCR("TX queue next"),
   6386 		    NULL, 0, &txq->txq_next,
   6387 		    0, CTL_CREATE, CTL_EOL) != 0)
   6388 			break;
   6389 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6390 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6391 		    "txq_sfree", SYSCTL_DESCR("TX queue sfree"),
   6392 		    NULL, 0, &txq->txq_sfree,
   6393 		    0, CTL_CREATE, CTL_EOL) != 0)
   6394 			break;
   6395 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6396 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6397 		    "txq_snext", SYSCTL_DESCR("TX queue snext"),
   6398 		    NULL, 0, &txq->txq_snext,
   6399 		    0, CTL_CREATE, CTL_EOL) != 0)
   6400 			break;
   6401 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6402 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6403 		    "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"),
   6404 		    NULL, 0, &txq->txq_sdirty,
   6405 		    0, CTL_CREATE, CTL_EOL) != 0)
   6406 			break;
   6407 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6408 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6409 		    "txq_flags", SYSCTL_DESCR("TX queue flags"),
   6410 		    NULL, 0, &txq->txq_flags,
   6411 		    0, CTL_CREATE, CTL_EOL) != 0)
   6412 			break;
   6413 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6414 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6415 		    "txq_stopping", SYSCTL_DESCR("TX queue stopping"),
   6416 		    NULL, 0, &txq->txq_stopping,
   6417 		    0, CTL_CREATE, CTL_EOL) != 0)
   6418 			break;
   6419 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6420 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6421 		    "txq_sending", SYSCTL_DESCR("TX queue sending"),
   6422 		    NULL, 0, &txq->txq_sending,
   6423 		    0, CTL_CREATE, CTL_EOL) != 0)
   6424 			break;
   6425 
   6426 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6427 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6428 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
   6429 		    NULL, 0, &rxq->rxq_ptr,
   6430 		    0, CTL_CREATE, CTL_EOL) != 0)
   6431 			break;
   6432 	}
   6433 
   6434 #ifdef WM_DEBUG
   6435 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6436 	    CTLTYPE_INT, "debug_flags",
   6437 	    SYSCTL_DESCR(
   6438 		    "Debug flags:\n"	\
   6439 		    "\t0x01 LINK\n"	\
   6440 		    "\t0x02 TX\n"	\
   6441 		    "\t0x04 RX\n"	\
   6442 		    "\t0x08 GMII\n"	\
   6443 		    "\t0x10 MANAGE\n"	\
   6444 		    "\t0x20 NVM\n"	\
   6445 		    "\t0x40 INIT\n"	\
   6446 		    "\t0x80 LOCK"),
   6447 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
   6448 	if (rv != 0)
   6449 		goto teardown;
   6450 #endif
   6451 
   6452 	return;
   6453 
   6454 teardown:
   6455 	sysctl_teardown(log);
   6456 err:
   6457 	sc->sc_sysctllog = NULL;
   6458 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   6459 	    __func__, rv);
   6460 }
   6461 
   6462 /*
   6463  * wm_init:		[ifnet interface function]
   6464  *
   6465  *	Initialize the interface.
   6466  */
   6467 static int
   6468 wm_init(struct ifnet *ifp)
   6469 {
   6470 	struct wm_softc *sc = ifp->if_softc;
   6471 	int ret;
   6472 
   6473 	WM_CORE_LOCK(sc);
   6474 	ret = wm_init_locked(ifp);
   6475 	WM_CORE_UNLOCK(sc);
   6476 
   6477 	return ret;
   6478 }
   6479 
   6480 static int
   6481 wm_init_locked(struct ifnet *ifp)
   6482 {
   6483 	struct wm_softc *sc = ifp->if_softc;
   6484 	struct ethercom *ec = &sc->sc_ethercom;
   6485 	int i, j, trynum, error = 0;
   6486 	uint32_t reg, sfp_mask = 0;
   6487 
   6488 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6489 		device_xname(sc->sc_dev), __func__));
   6490 	KASSERT(WM_CORE_LOCKED(sc));
   6491 
   6492 	/*
   6493 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   6494 	 * There is a small but measurable benefit to avoiding the adjusment
   6495 	 * of the descriptor so that the headers are aligned, for normal mtu,
   6496 	 * on such platforms.  One possibility is that the DMA itself is
   6497 	 * slightly more efficient if the front of the entire packet (instead
   6498 	 * of the front of the headers) is aligned.
   6499 	 *
   6500 	 * Note we must always set align_tweak to 0 if we are using
   6501 	 * jumbo frames.
   6502 	 */
   6503 #ifdef __NO_STRICT_ALIGNMENT
   6504 	sc->sc_align_tweak = 0;
   6505 #else
   6506 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   6507 		sc->sc_align_tweak = 0;
   6508 	else
   6509 		sc->sc_align_tweak = 2;
   6510 #endif /* __NO_STRICT_ALIGNMENT */
   6511 
   6512 	/* Cancel any pending I/O. */
   6513 	wm_stop_locked(ifp, false, false);
   6514 
   6515 	/* Update statistics before reset */
   6516 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   6517 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   6518 
   6519 	/* >= PCH_SPT hardware workaround before reset. */
   6520 	if (sc->sc_type >= WM_T_PCH_SPT)
   6521 		wm_flush_desc_rings(sc);
   6522 
   6523 	/* Reset the chip to a known state. */
   6524 	wm_reset(sc);
   6525 
   6526 	/*
   6527 	 * AMT based hardware can now take control from firmware
   6528 	 * Do this after reset.
   6529 	 */
   6530 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   6531 		wm_get_hw_control(sc);
   6532 
   6533 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   6534 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   6535 		wm_legacy_irq_quirk_spt(sc);
   6536 
   6537 	/* Init hardware bits */
   6538 	wm_initialize_hardware_bits(sc);
   6539 
   6540 	/* Reset the PHY. */
   6541 	if (sc->sc_flags & WM_F_HAS_MII)
   6542 		wm_gmii_reset(sc);
   6543 
   6544 	if (sc->sc_type >= WM_T_ICH8) {
   6545 		reg = CSR_READ(sc, WMREG_GCR);
   6546 		/*
   6547 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   6548 		 * default after reset.
   6549 		 */
   6550 		if (sc->sc_type == WM_T_ICH8)
   6551 			reg |= GCR_NO_SNOOP_ALL;
   6552 		else
   6553 			reg &= ~GCR_NO_SNOOP_ALL;
   6554 		CSR_WRITE(sc, WMREG_GCR, reg);
   6555 	}
   6556 
   6557 	if ((sc->sc_type >= WM_T_ICH8)
   6558 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   6559 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   6560 
   6561 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6562 		reg |= CTRL_EXT_RO_DIS;
   6563 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6564 	}
   6565 
   6566 	/* Calculate (E)ITR value */
   6567 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   6568 		/*
   6569 		 * For NEWQUEUE's EITR (except for 82575).
   6570 		 * 82575's EITR should be set same throttling value as other
   6571 		 * old controllers' ITR because the interrupt/sec calculation
   6572 		 * is the same, that is, 1,000,000,000 / (N * 256).
   6573 		 *
   6574 		 * 82574's EITR should be set same throttling value as ITR.
   6575 		 *
   6576 		 * For N interrupts/sec, set this value to:
   6577 		 * 1,000,000 / N in contrast to ITR throttling value.
   6578 		 */
   6579 		sc->sc_itr_init = 450;
   6580 	} else if (sc->sc_type >= WM_T_82543) {
   6581 		/*
   6582 		 * Set up the interrupt throttling register (units of 256ns)
   6583 		 * Note that a footnote in Intel's documentation says this
   6584 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   6585 		 * or 10Mbit mode.  Empirically, it appears to be the case
   6586 		 * that that is also true for the 1024ns units of the other
   6587 		 * interrupt-related timer registers -- so, really, we ought
   6588 		 * to divide this value by 4 when the link speed is low.
   6589 		 *
   6590 		 * XXX implement this division at link speed change!
   6591 		 */
   6592 
   6593 		/*
   6594 		 * For N interrupts/sec, set this value to:
   6595 		 * 1,000,000,000 / (N * 256).  Note that we set the
   6596 		 * absolute and packet timer values to this value
   6597 		 * divided by 4 to get "simple timer" behavior.
   6598 		 */
   6599 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   6600 	}
   6601 
   6602 	error = wm_init_txrx_queues(sc);
   6603 	if (error)
   6604 		goto out;
   6605 
   6606 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   6607 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   6608 	    (sc->sc_type >= WM_T_82575))
   6609 		wm_serdes_power_up_link_82575(sc);
   6610 
   6611 	/* Clear out the VLAN table -- we don't use it (yet). */
   6612 	CSR_WRITE(sc, WMREG_VET, 0);
   6613 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   6614 		trynum = 10; /* Due to hw errata */
   6615 	else
   6616 		trynum = 1;
   6617 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   6618 		for (j = 0; j < trynum; j++)
   6619 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   6620 
   6621 	/*
   6622 	 * Set up flow-control parameters.
   6623 	 *
   6624 	 * XXX Values could probably stand some tuning.
   6625 	 */
   6626 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   6627 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   6628 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   6629 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   6630 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   6631 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   6632 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   6633 	}
   6634 
   6635 	sc->sc_fcrtl = FCRTL_DFLT;
   6636 	if (sc->sc_type < WM_T_82543) {
   6637 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   6638 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   6639 	} else {
   6640 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   6641 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   6642 	}
   6643 
   6644 	if (sc->sc_type == WM_T_80003)
   6645 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   6646 	else
   6647 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   6648 
   6649 	/* Writes the control register. */
   6650 	wm_set_vlan(sc);
   6651 
   6652 	if (sc->sc_flags & WM_F_HAS_MII) {
   6653 		uint16_t kmreg;
   6654 
   6655 		switch (sc->sc_type) {
   6656 		case WM_T_80003:
   6657 		case WM_T_ICH8:
   6658 		case WM_T_ICH9:
   6659 		case WM_T_ICH10:
   6660 		case WM_T_PCH:
   6661 		case WM_T_PCH2:
   6662 		case WM_T_PCH_LPT:
   6663 		case WM_T_PCH_SPT:
   6664 		case WM_T_PCH_CNP:
   6665 			/*
   6666 			 * Set the mac to wait the maximum time between each
   6667 			 * iteration and increase the max iterations when
   6668 			 * polling the phy; this fixes erroneous timeouts at
   6669 			 * 10Mbps.
   6670 			 */
   6671 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   6672 			    0xFFFF);
   6673 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6674 			    &kmreg);
   6675 			kmreg |= 0x3F;
   6676 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6677 			    kmreg);
   6678 			break;
   6679 		default:
   6680 			break;
   6681 		}
   6682 
   6683 		if (sc->sc_type == WM_T_80003) {
   6684 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6685 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6686 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6687 
   6688 			/* Bypass RX and TX FIFOs */
   6689 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6690 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6691 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6692 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6693 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6694 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6695 		}
   6696 	}
   6697 #if 0
   6698 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6699 #endif
   6700 
   6701 	/* Set up checksum offload parameters. */
   6702 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6703 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6704 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6705 		reg |= RXCSUM_IPOFL;
   6706 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6707 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6708 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6709 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6710 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6711 
   6712 	/* Set registers about MSI-X */
   6713 	if (wm_is_using_msix(sc)) {
   6714 		uint32_t ivar, qintr_idx;
   6715 		struct wm_queue *wmq;
   6716 		unsigned int qid;
   6717 
   6718 		if (sc->sc_type == WM_T_82575) {
   6719 			/* Interrupt control */
   6720 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6721 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6722 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6723 
   6724 			/* TX and RX */
   6725 			for (i = 0; i < sc->sc_nqueues; i++) {
   6726 				wmq = &sc->sc_queue[i];
   6727 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6728 				    EITR_TX_QUEUE(wmq->wmq_id)
   6729 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6730 			}
   6731 			/* Link status */
   6732 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6733 			    EITR_OTHER);
   6734 		} else if (sc->sc_type == WM_T_82574) {
   6735 			/* Interrupt control */
   6736 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6737 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6738 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6739 
   6740 			/*
   6741 			 * Work around issue with spurious interrupts
   6742 			 * in MSI-X mode.
   6743 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6744 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6745 			 */
   6746 			reg = CSR_READ(sc, WMREG_RFCTL);
   6747 			reg |= WMREG_RFCTL_ACKDIS;
   6748 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6749 
   6750 			ivar = 0;
   6751 			/* TX and RX */
   6752 			for (i = 0; i < sc->sc_nqueues; i++) {
   6753 				wmq = &sc->sc_queue[i];
   6754 				qid = wmq->wmq_id;
   6755 				qintr_idx = wmq->wmq_intr_idx;
   6756 
   6757 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6758 				    IVAR_TX_MASK_Q_82574(qid));
   6759 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6760 				    IVAR_RX_MASK_Q_82574(qid));
   6761 			}
   6762 			/* Link status */
   6763 			ivar |= __SHIFTIN((IVAR_VALID_82574
   6764 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   6765 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   6766 		} else {
   6767 			/* Interrupt control */
   6768 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   6769 			    | GPIE_EIAME | GPIE_PBA);
   6770 
   6771 			switch (sc->sc_type) {
   6772 			case WM_T_82580:
   6773 			case WM_T_I350:
   6774 			case WM_T_I354:
   6775 			case WM_T_I210:
   6776 			case WM_T_I211:
   6777 				/* TX and RX */
   6778 				for (i = 0; i < sc->sc_nqueues; i++) {
   6779 					wmq = &sc->sc_queue[i];
   6780 					qid = wmq->wmq_id;
   6781 					qintr_idx = wmq->wmq_intr_idx;
   6782 
   6783 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6784 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6785 					ivar |= __SHIFTIN((qintr_idx
   6786 						| IVAR_VALID),
   6787 					    IVAR_TX_MASK_Q(qid));
   6788 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6789 					ivar |= __SHIFTIN((qintr_idx
   6790 						| IVAR_VALID),
   6791 					    IVAR_RX_MASK_Q(qid));
   6792 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6793 				}
   6794 				break;
   6795 			case WM_T_82576:
   6796 				/* TX and RX */
   6797 				for (i = 0; i < sc->sc_nqueues; i++) {
   6798 					wmq = &sc->sc_queue[i];
   6799 					qid = wmq->wmq_id;
   6800 					qintr_idx = wmq->wmq_intr_idx;
   6801 
   6802 					ivar = CSR_READ(sc,
   6803 					    WMREG_IVAR_Q_82576(qid));
   6804 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6805 					ivar |= __SHIFTIN((qintr_idx
   6806 						| IVAR_VALID),
   6807 					    IVAR_TX_MASK_Q_82576(qid));
   6808 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6809 					ivar |= __SHIFTIN((qintr_idx
   6810 						| IVAR_VALID),
   6811 					    IVAR_RX_MASK_Q_82576(qid));
   6812 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6813 					    ivar);
   6814 				}
   6815 				break;
   6816 			default:
   6817 				break;
   6818 			}
   6819 
   6820 			/* Link status */
   6821 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6822 			    IVAR_MISC_OTHER);
   6823 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6824 		}
   6825 
   6826 		if (wm_is_using_multiqueue(sc)) {
   6827 			wm_init_rss(sc);
   6828 
   6829 			/*
   6830 			** NOTE: Receive Full-Packet Checksum Offload
   6831 			** is mutually exclusive with Multiqueue. However
   6832 			** this is not the same as TCP/IP checksums which
   6833 			** still work.
   6834 			*/
   6835 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6836 			reg |= RXCSUM_PCSD;
   6837 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6838 		}
   6839 	}
   6840 
   6841 	/* Set up the interrupt registers. */
   6842 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6843 
   6844 	/* Enable SFP module insertion interrupt if it's required */
   6845 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6846 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6847 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6848 		sfp_mask = ICR_GPI(0);
   6849 	}
   6850 
   6851 	if (wm_is_using_msix(sc)) {
   6852 		uint32_t mask;
   6853 		struct wm_queue *wmq;
   6854 
   6855 		switch (sc->sc_type) {
   6856 		case WM_T_82574:
   6857 			mask = 0;
   6858 			for (i = 0; i < sc->sc_nqueues; i++) {
   6859 				wmq = &sc->sc_queue[i];
   6860 				mask |= ICR_TXQ(wmq->wmq_id);
   6861 				mask |= ICR_RXQ(wmq->wmq_id);
   6862 			}
   6863 			mask |= ICR_OTHER;
   6864 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6865 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6866 			break;
   6867 		default:
   6868 			if (sc->sc_type == WM_T_82575) {
   6869 				mask = 0;
   6870 				for (i = 0; i < sc->sc_nqueues; i++) {
   6871 					wmq = &sc->sc_queue[i];
   6872 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6873 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6874 				}
   6875 				mask |= EITR_OTHER;
   6876 			} else {
   6877 				mask = 0;
   6878 				for (i = 0; i < sc->sc_nqueues; i++) {
   6879 					wmq = &sc->sc_queue[i];
   6880 					mask |= 1 << wmq->wmq_intr_idx;
   6881 				}
   6882 				mask |= 1 << sc->sc_link_intr_idx;
   6883 			}
   6884 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6885 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6886 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6887 
   6888 			/* For other interrupts */
   6889 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6890 			break;
   6891 		}
   6892 	} else {
   6893 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6894 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6895 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6896 	}
   6897 
   6898 	/* Set up the inter-packet gap. */
   6899 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6900 
   6901 	if (sc->sc_type >= WM_T_82543) {
   6902 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6903 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6904 			wm_itrs_writereg(sc, wmq);
   6905 		}
   6906 		/*
   6907 		 * Link interrupts occur much less than TX
   6908 		 * interrupts and RX interrupts. So, we don't
   6909 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6910 		 * FreeBSD's if_igb.
   6911 		 */
   6912 	}
   6913 
   6914 	/* Set the VLAN EtherType. */
   6915 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6916 
   6917 	/*
   6918 	 * Set up the transmit control register; we start out with
   6919 	 * a collision distance suitable for FDX, but update it when
   6920 	 * we resolve the media type.
   6921 	 */
   6922 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6923 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6924 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6925 	if (sc->sc_type >= WM_T_82571)
   6926 		sc->sc_tctl |= TCTL_MULR;
   6927 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6928 
   6929 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6930 		/* Write TDT after TCTL.EN is set. See the document. */
   6931 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6932 	}
   6933 
   6934 	if (sc->sc_type == WM_T_80003) {
   6935 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6936 		reg &= ~TCTL_EXT_GCEX_MASK;
   6937 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6938 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6939 	}
   6940 
   6941 	/* Set the media. */
   6942 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6943 		goto out;
   6944 
   6945 	/* Configure for OS presence */
   6946 	wm_init_manageability(sc);
   6947 
   6948 	/*
   6949 	 * Set up the receive control register; we actually program the
   6950 	 * register when we set the receive filter. Use multicast address
   6951 	 * offset type 0.
   6952 	 *
   6953 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6954 	 * don't enable that feature.
   6955 	 */
   6956 	sc->sc_mchash_type = 0;
   6957 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6958 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6959 
   6960 	/* 82574 use one buffer extended Rx descriptor. */
   6961 	if (sc->sc_type == WM_T_82574)
   6962 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6963 
   6964 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
   6965 		sc->sc_rctl |= RCTL_SECRC;
   6966 
   6967 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6968 	    && (ifp->if_mtu > ETHERMTU)) {
   6969 		sc->sc_rctl |= RCTL_LPE;
   6970 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6971 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6972 	}
   6973 
   6974 	if (MCLBYTES == 2048)
   6975 		sc->sc_rctl |= RCTL_2k;
   6976 	else {
   6977 		if (sc->sc_type >= WM_T_82543) {
   6978 			switch (MCLBYTES) {
   6979 			case 4096:
   6980 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6981 				break;
   6982 			case 8192:
   6983 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6984 				break;
   6985 			case 16384:
   6986 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6987 				break;
   6988 			default:
   6989 				panic("wm_init: MCLBYTES %d unsupported",
   6990 				    MCLBYTES);
   6991 				break;
   6992 			}
   6993 		} else
   6994 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6995 	}
   6996 
   6997 	/* Enable ECC */
   6998 	switch (sc->sc_type) {
   6999 	case WM_T_82571:
   7000 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   7001 		reg |= PBA_ECC_CORR_EN;
   7002 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   7003 		break;
   7004 	case WM_T_PCH_LPT:
   7005 	case WM_T_PCH_SPT:
   7006 	case WM_T_PCH_CNP:
   7007 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   7008 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   7009 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   7010 
   7011 		sc->sc_ctrl |= CTRL_MEHE;
   7012 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7013 		break;
   7014 	default:
   7015 		break;
   7016 	}
   7017 
   7018 	/*
   7019 	 * Set the receive filter.
   7020 	 *
   7021 	 * For 82575 and 82576, the RX descriptors must be initialized after
   7022 	 * the setting of RCTL.EN in wm_set_filter()
   7023 	 */
   7024 	wm_set_filter(sc);
   7025 
   7026 	/* On 575 and later set RDT only if RX enabled */
   7027 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7028 		int qidx;
   7029 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7030 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   7031 			for (i = 0; i < WM_NRXDESC; i++) {
   7032 				mutex_enter(rxq->rxq_lock);
   7033 				wm_init_rxdesc(rxq, i);
   7034 				mutex_exit(rxq->rxq_lock);
   7035 
   7036 			}
   7037 		}
   7038 	}
   7039 
   7040 	wm_unset_stopping_flags(sc);
   7041 
   7042 	/* Start the one second link check clock. */
   7043 	callout_schedule(&sc->sc_tick_ch, hz);
   7044 
   7045 	/* ...all done! */
   7046 	ifp->if_flags |= IFF_RUNNING;
   7047 
   7048  out:
   7049 	/* Save last flags for the callback */
   7050 	sc->sc_if_flags = ifp->if_flags;
   7051 	sc->sc_ec_capenable = ec->ec_capenable;
   7052 	if (error)
   7053 		log(LOG_ERR, "%s: interface not running\n",
   7054 		    device_xname(sc->sc_dev));
   7055 	return error;
   7056 }
   7057 
   7058 /*
   7059  * wm_stop:		[ifnet interface function]
   7060  *
   7061  *	Stop transmission on the interface.
   7062  */
   7063 static void
   7064 wm_stop(struct ifnet *ifp, int disable)
   7065 {
   7066 	struct wm_softc *sc = ifp->if_softc;
   7067 
   7068 	ASSERT_SLEEPABLE();
   7069 
   7070 	WM_CORE_LOCK(sc);
   7071 	wm_stop_locked(ifp, disable ? true : false, true);
   7072 	WM_CORE_UNLOCK(sc);
   7073 
   7074 	/*
   7075 	 * After wm_set_stopping_flags(), it is guaranteed
   7076 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   7077 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   7078 	 * because it can sleep...
   7079 	 * so, call workqueue_wait() here.
   7080 	 */
   7081 	for (int i = 0; i < sc->sc_nqueues; i++)
   7082 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   7083 }
   7084 
   7085 static void
   7086 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   7087 {
   7088 	struct wm_softc *sc = ifp->if_softc;
   7089 	struct wm_txsoft *txs;
   7090 	int i, qidx;
   7091 
   7092 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7093 		device_xname(sc->sc_dev), __func__));
   7094 	KASSERT(WM_CORE_LOCKED(sc));
   7095 
   7096 	wm_set_stopping_flags(sc);
   7097 
   7098 	if (sc->sc_flags & WM_F_HAS_MII) {
   7099 		/* Down the MII. */
   7100 		mii_down(&sc->sc_mii);
   7101 	} else {
   7102 #if 0
   7103 		/* Should we clear PHY's status properly? */
   7104 		wm_reset(sc);
   7105 #endif
   7106 	}
   7107 
   7108 	/* Stop the transmit and receive processes. */
   7109 	CSR_WRITE(sc, WMREG_TCTL, 0);
   7110 	CSR_WRITE(sc, WMREG_RCTL, 0);
   7111 	sc->sc_rctl &= ~RCTL_EN;
   7112 
   7113 	/*
   7114 	 * Clear the interrupt mask to ensure the device cannot assert its
   7115 	 * interrupt line.
   7116 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   7117 	 * service any currently pending or shared interrupt.
   7118 	 */
   7119 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   7120 	sc->sc_icr = 0;
   7121 	if (wm_is_using_msix(sc)) {
   7122 		if (sc->sc_type != WM_T_82574) {
   7123 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   7124 			CSR_WRITE(sc, WMREG_EIAC, 0);
   7125 		} else
   7126 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   7127 	}
   7128 
   7129 	/*
   7130 	 * Stop callouts after interrupts are disabled; if we have
   7131 	 * to wait for them, we will be releasing the CORE_LOCK
   7132 	 * briefly, which will unblock interrupts on the current CPU.
   7133 	 */
   7134 
   7135 	/* Stop the one second clock. */
   7136 	if (wait)
   7137 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   7138 	else
   7139 		callout_stop(&sc->sc_tick_ch);
   7140 
   7141 	/* Stop the 82547 Tx FIFO stall check timer. */
   7142 	if (sc->sc_type == WM_T_82547) {
   7143 		if (wait)
   7144 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   7145 		else
   7146 			callout_stop(&sc->sc_txfifo_ch);
   7147 	}
   7148 
   7149 	/* Release any queued transmit buffers. */
   7150 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7151 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   7152 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7153 		struct mbuf *m;
   7154 
   7155 		mutex_enter(txq->txq_lock);
   7156 		txq->txq_sending = false; /* Ensure watchdog disabled */
   7157 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7158 			txs = &txq->txq_soft[i];
   7159 			if (txs->txs_mbuf != NULL) {
   7160 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   7161 				m_freem(txs->txs_mbuf);
   7162 				txs->txs_mbuf = NULL;
   7163 			}
   7164 		}
   7165 		/* Drain txq_interq */
   7166 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7167 			m_freem(m);
   7168 		mutex_exit(txq->txq_lock);
   7169 	}
   7170 
   7171 	/* Mark the interface as down and cancel the watchdog timer. */
   7172 	ifp->if_flags &= ~IFF_RUNNING;
   7173 
   7174 	if (disable) {
   7175 		for (i = 0; i < sc->sc_nqueues; i++) {
   7176 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7177 			mutex_enter(rxq->rxq_lock);
   7178 			wm_rxdrain(rxq);
   7179 			mutex_exit(rxq->rxq_lock);
   7180 		}
   7181 	}
   7182 
   7183 #if 0 /* notyet */
   7184 	if (sc->sc_type >= WM_T_82544)
   7185 		CSR_WRITE(sc, WMREG_WUC, 0);
   7186 #endif
   7187 }
   7188 
   7189 static void
   7190 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   7191 {
   7192 	struct mbuf *m;
   7193 	int i;
   7194 
   7195 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   7196 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   7197 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   7198 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   7199 		    m->m_data, m->m_len, m->m_flags);
   7200 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   7201 	    i, i == 1 ? "" : "s");
   7202 }
   7203 
   7204 /*
   7205  * wm_82547_txfifo_stall:
   7206  *
   7207  *	Callout used to wait for the 82547 Tx FIFO to drain,
   7208  *	reset the FIFO pointers, and restart packet transmission.
   7209  */
   7210 static void
   7211 wm_82547_txfifo_stall(void *arg)
   7212 {
   7213 	struct wm_softc *sc = arg;
   7214 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7215 
   7216 	mutex_enter(txq->txq_lock);
   7217 
   7218 	if (txq->txq_stopping)
   7219 		goto out;
   7220 
   7221 	if (txq->txq_fifo_stall) {
   7222 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   7223 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   7224 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   7225 			/*
   7226 			 * Packets have drained.  Stop transmitter, reset
   7227 			 * FIFO pointers, restart transmitter, and kick
   7228 			 * the packet queue.
   7229 			 */
   7230 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   7231 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   7232 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   7233 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   7234 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   7235 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   7236 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   7237 			CSR_WRITE_FLUSH(sc);
   7238 
   7239 			txq->txq_fifo_head = 0;
   7240 			txq->txq_fifo_stall = 0;
   7241 			wm_start_locked(&sc->sc_ethercom.ec_if);
   7242 		} else {
   7243 			/*
   7244 			 * Still waiting for packets to drain; try again in
   7245 			 * another tick.
   7246 			 */
   7247 			callout_schedule(&sc->sc_txfifo_ch, 1);
   7248 		}
   7249 	}
   7250 
   7251 out:
   7252 	mutex_exit(txq->txq_lock);
   7253 }
   7254 
   7255 /*
   7256  * wm_82547_txfifo_bugchk:
   7257  *
   7258  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   7259  *	prevent enqueueing a packet that would wrap around the end
   7260  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   7261  *
   7262  *	We do this by checking the amount of space before the end
   7263  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   7264  *	the Tx FIFO, wait for all remaining packets to drain, reset
   7265  *	the internal FIFO pointers to the beginning, and restart
   7266  *	transmission on the interface.
   7267  */
   7268 #define	WM_FIFO_HDR		0x10
   7269 #define	WM_82547_PAD_LEN	0x3e0
   7270 static int
   7271 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   7272 {
   7273 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7274 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   7275 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   7276 
   7277 	/* Just return if already stalled. */
   7278 	if (txq->txq_fifo_stall)
   7279 		return 1;
   7280 
   7281 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   7282 		/* Stall only occurs in half-duplex mode. */
   7283 		goto send_packet;
   7284 	}
   7285 
   7286 	if (len >= WM_82547_PAD_LEN + space) {
   7287 		txq->txq_fifo_stall = 1;
   7288 		callout_schedule(&sc->sc_txfifo_ch, 1);
   7289 		return 1;
   7290 	}
   7291 
   7292  send_packet:
   7293 	txq->txq_fifo_head += len;
   7294 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   7295 		txq->txq_fifo_head -= txq->txq_fifo_size;
   7296 
   7297 	return 0;
   7298 }
   7299 
   7300 static int
   7301 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7302 {
   7303 	int error;
   7304 
   7305 	/*
   7306 	 * Allocate the control data structures, and create and load the
   7307 	 * DMA map for it.
   7308 	 *
   7309 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7310 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7311 	 * both sets within the same 4G segment.
   7312 	 */
   7313 	if (sc->sc_type < WM_T_82544)
   7314 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   7315 	else
   7316 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   7317 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7318 		txq->txq_descsize = sizeof(nq_txdesc_t);
   7319 	else
   7320 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   7321 
   7322 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   7323 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   7324 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   7325 		aprint_error_dev(sc->sc_dev,
   7326 		    "unable to allocate TX control data, error = %d\n",
   7327 		    error);
   7328 		goto fail_0;
   7329 	}
   7330 
   7331 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   7332 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   7333 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7334 		aprint_error_dev(sc->sc_dev,
   7335 		    "unable to map TX control data, error = %d\n", error);
   7336 		goto fail_1;
   7337 	}
   7338 
   7339 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   7340 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   7341 		aprint_error_dev(sc->sc_dev,
   7342 		    "unable to create TX control data DMA map, error = %d\n",
   7343 		    error);
   7344 		goto fail_2;
   7345 	}
   7346 
   7347 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   7348 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   7349 		aprint_error_dev(sc->sc_dev,
   7350 		    "unable to load TX control data DMA map, error = %d\n",
   7351 		    error);
   7352 		goto fail_3;
   7353 	}
   7354 
   7355 	return 0;
   7356 
   7357  fail_3:
   7358 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7359  fail_2:
   7360 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7361 	    WM_TXDESCS_SIZE(txq));
   7362  fail_1:
   7363 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7364  fail_0:
   7365 	return error;
   7366 }
   7367 
   7368 static void
   7369 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7370 {
   7371 
   7372 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   7373 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7374 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7375 	    WM_TXDESCS_SIZE(txq));
   7376 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7377 }
   7378 
   7379 static int
   7380 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7381 {
   7382 	int error;
   7383 	size_t rxq_descs_size;
   7384 
   7385 	/*
   7386 	 * Allocate the control data structures, and create and load the
   7387 	 * DMA map for it.
   7388 	 *
   7389 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7390 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7391 	 * both sets within the same 4G segment.
   7392 	 */
   7393 	rxq->rxq_ndesc = WM_NRXDESC;
   7394 	if (sc->sc_type == WM_T_82574)
   7395 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   7396 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7397 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   7398 	else
   7399 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   7400 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   7401 
   7402 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   7403 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   7404 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   7405 		aprint_error_dev(sc->sc_dev,
   7406 		    "unable to allocate RX control data, error = %d\n",
   7407 		    error);
   7408 		goto fail_0;
   7409 	}
   7410 
   7411 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   7412 		    rxq->rxq_desc_rseg, rxq_descs_size,
   7413 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7414 		aprint_error_dev(sc->sc_dev,
   7415 		    "unable to map RX control data, error = %d\n", error);
   7416 		goto fail_1;
   7417 	}
   7418 
   7419 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   7420 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   7421 		aprint_error_dev(sc->sc_dev,
   7422 		    "unable to create RX control data DMA map, error = %d\n",
   7423 		    error);
   7424 		goto fail_2;
   7425 	}
   7426 
   7427 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   7428 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   7429 		aprint_error_dev(sc->sc_dev,
   7430 		    "unable to load RX control data DMA map, error = %d\n",
   7431 		    error);
   7432 		goto fail_3;
   7433 	}
   7434 
   7435 	return 0;
   7436 
   7437  fail_3:
   7438 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7439  fail_2:
   7440 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7441 	    rxq_descs_size);
   7442  fail_1:
   7443 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7444  fail_0:
   7445 	return error;
   7446 }
   7447 
   7448 static void
   7449 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7450 {
   7451 
   7452 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7453 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7454 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7455 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   7456 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7457 }
   7458 
   7459 
   7460 static int
   7461 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7462 {
   7463 	int i, error;
   7464 
   7465 	/* Create the transmit buffer DMA maps. */
   7466 	WM_TXQUEUELEN(txq) =
   7467 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   7468 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   7469 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7470 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   7471 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   7472 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   7473 			aprint_error_dev(sc->sc_dev,
   7474 			    "unable to create Tx DMA map %d, error = %d\n",
   7475 			    i, error);
   7476 			goto fail;
   7477 		}
   7478 	}
   7479 
   7480 	return 0;
   7481 
   7482  fail:
   7483 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7484 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7485 			bus_dmamap_destroy(sc->sc_dmat,
   7486 			    txq->txq_soft[i].txs_dmamap);
   7487 	}
   7488 	return error;
   7489 }
   7490 
   7491 static void
   7492 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7493 {
   7494 	int i;
   7495 
   7496 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7497 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7498 			bus_dmamap_destroy(sc->sc_dmat,
   7499 			    txq->txq_soft[i].txs_dmamap);
   7500 	}
   7501 }
   7502 
   7503 static int
   7504 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7505 {
   7506 	int i, error;
   7507 
   7508 	/* Create the receive buffer DMA maps. */
   7509 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7510 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   7511 			    MCLBYTES, 0, 0,
   7512 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   7513 			aprint_error_dev(sc->sc_dev,
   7514 			    "unable to create Rx DMA map %d error = %d\n",
   7515 			    i, error);
   7516 			goto fail;
   7517 		}
   7518 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   7519 	}
   7520 
   7521 	return 0;
   7522 
   7523  fail:
   7524 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7525 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7526 			bus_dmamap_destroy(sc->sc_dmat,
   7527 			    rxq->rxq_soft[i].rxs_dmamap);
   7528 	}
   7529 	return error;
   7530 }
   7531 
   7532 static void
   7533 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7534 {
   7535 	int i;
   7536 
   7537 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7538 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7539 			bus_dmamap_destroy(sc->sc_dmat,
   7540 			    rxq->rxq_soft[i].rxs_dmamap);
   7541 	}
   7542 }
   7543 
   7544 /*
   7545  * wm_alloc_quques:
   7546  *	Allocate {tx,rx}descs and {tx,rx} buffers
   7547  */
   7548 static int
   7549 wm_alloc_txrx_queues(struct wm_softc *sc)
   7550 {
   7551 	int i, error, tx_done, rx_done;
   7552 
   7553 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   7554 	    KM_SLEEP);
   7555 	if (sc->sc_queue == NULL) {
   7556 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   7557 		error = ENOMEM;
   7558 		goto fail_0;
   7559 	}
   7560 
   7561 	/* For transmission */
   7562 	error = 0;
   7563 	tx_done = 0;
   7564 	for (i = 0; i < sc->sc_nqueues; i++) {
   7565 #ifdef WM_EVENT_COUNTERS
   7566 		int j;
   7567 		const char *xname;
   7568 #endif
   7569 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7570 		txq->txq_sc = sc;
   7571 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7572 
   7573 		error = wm_alloc_tx_descs(sc, txq);
   7574 		if (error)
   7575 			break;
   7576 		error = wm_alloc_tx_buffer(sc, txq);
   7577 		if (error) {
   7578 			wm_free_tx_descs(sc, txq);
   7579 			break;
   7580 		}
   7581 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   7582 		if (txq->txq_interq == NULL) {
   7583 			wm_free_tx_descs(sc, txq);
   7584 			wm_free_tx_buffer(sc, txq);
   7585 			error = ENOMEM;
   7586 			break;
   7587 		}
   7588 
   7589 #ifdef WM_EVENT_COUNTERS
   7590 		xname = device_xname(sc->sc_dev);
   7591 
   7592 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   7593 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   7594 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   7595 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   7596 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   7597 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   7598 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   7599 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   7600 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   7601 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   7602 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   7603 
   7604 		for (j = 0; j < WM_NTXSEGS; j++) {
   7605 			snprintf(txq->txq_txseg_evcnt_names[j],
   7606 			    sizeof(txq->txq_txseg_evcnt_names[j]),
   7607 			    "txq%02dtxseg%d", i, j);
   7608 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j],
   7609 			    EVCNT_TYPE_MISC,
   7610 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   7611 		}
   7612 
   7613 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   7614 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   7615 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   7616 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   7617 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   7618 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   7619 #endif /* WM_EVENT_COUNTERS */
   7620 
   7621 		tx_done++;
   7622 	}
   7623 	if (error)
   7624 		goto fail_1;
   7625 
   7626 	/* For receive */
   7627 	error = 0;
   7628 	rx_done = 0;
   7629 	for (i = 0; i < sc->sc_nqueues; i++) {
   7630 #ifdef WM_EVENT_COUNTERS
   7631 		const char *xname;
   7632 #endif
   7633 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7634 		rxq->rxq_sc = sc;
   7635 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7636 
   7637 		error = wm_alloc_rx_descs(sc, rxq);
   7638 		if (error)
   7639 			break;
   7640 
   7641 		error = wm_alloc_rx_buffer(sc, rxq);
   7642 		if (error) {
   7643 			wm_free_rx_descs(sc, rxq);
   7644 			break;
   7645 		}
   7646 
   7647 #ifdef WM_EVENT_COUNTERS
   7648 		xname = device_xname(sc->sc_dev);
   7649 
   7650 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   7651 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   7652 
   7653 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   7654 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   7655 #endif /* WM_EVENT_COUNTERS */
   7656 
   7657 		rx_done++;
   7658 	}
   7659 	if (error)
   7660 		goto fail_2;
   7661 
   7662 	return 0;
   7663 
   7664  fail_2:
   7665 	for (i = 0; i < rx_done; i++) {
   7666 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7667 		wm_free_rx_buffer(sc, rxq);
   7668 		wm_free_rx_descs(sc, rxq);
   7669 		if (rxq->rxq_lock)
   7670 			mutex_obj_free(rxq->rxq_lock);
   7671 	}
   7672  fail_1:
   7673 	for (i = 0; i < tx_done; i++) {
   7674 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7675 		pcq_destroy(txq->txq_interq);
   7676 		wm_free_tx_buffer(sc, txq);
   7677 		wm_free_tx_descs(sc, txq);
   7678 		if (txq->txq_lock)
   7679 			mutex_obj_free(txq->txq_lock);
   7680 	}
   7681 
   7682 	kmem_free(sc->sc_queue,
   7683 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   7684  fail_0:
   7685 	return error;
   7686 }
   7687 
   7688 /*
   7689  * wm_free_quques:
   7690  *	Free {tx,rx}descs and {tx,rx} buffers
   7691  */
   7692 static void
   7693 wm_free_txrx_queues(struct wm_softc *sc)
   7694 {
   7695 	int i;
   7696 
   7697 	for (i = 0; i < sc->sc_nqueues; i++) {
   7698 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7699 
   7700 #ifdef WM_EVENT_COUNTERS
   7701 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7702 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7703 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7704 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7705 #endif /* WM_EVENT_COUNTERS */
   7706 
   7707 		wm_free_rx_buffer(sc, rxq);
   7708 		wm_free_rx_descs(sc, rxq);
   7709 		if (rxq->rxq_lock)
   7710 			mutex_obj_free(rxq->rxq_lock);
   7711 	}
   7712 
   7713 	for (i = 0; i < sc->sc_nqueues; i++) {
   7714 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7715 		struct mbuf *m;
   7716 #ifdef WM_EVENT_COUNTERS
   7717 		int j;
   7718 
   7719 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7720 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7721 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7722 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7723 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7724 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7725 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7726 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7727 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7728 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7729 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7730 
   7731 		for (j = 0; j < WM_NTXSEGS; j++)
   7732 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7733 
   7734 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7735 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7736 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7737 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7738 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7739 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   7740 #endif /* WM_EVENT_COUNTERS */
   7741 
   7742 		/* Drain txq_interq */
   7743 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7744 			m_freem(m);
   7745 		pcq_destroy(txq->txq_interq);
   7746 
   7747 		wm_free_tx_buffer(sc, txq);
   7748 		wm_free_tx_descs(sc, txq);
   7749 		if (txq->txq_lock)
   7750 			mutex_obj_free(txq->txq_lock);
   7751 	}
   7752 
   7753 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   7754 }
   7755 
   7756 static void
   7757 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7758 {
   7759 
   7760 	KASSERT(mutex_owned(txq->txq_lock));
   7761 
   7762 	/* Initialize the transmit descriptor ring. */
   7763 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   7764 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   7765 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7766 	txq->txq_free = WM_NTXDESC(txq);
   7767 	txq->txq_next = 0;
   7768 }
   7769 
   7770 static void
   7771 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7772     struct wm_txqueue *txq)
   7773 {
   7774 
   7775 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7776 		device_xname(sc->sc_dev), __func__));
   7777 	KASSERT(mutex_owned(txq->txq_lock));
   7778 
   7779 	if (sc->sc_type < WM_T_82543) {
   7780 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   7781 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   7782 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   7783 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   7784 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   7785 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   7786 	} else {
   7787 		int qid = wmq->wmq_id;
   7788 
   7789 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   7790 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   7791 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   7792 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   7793 
   7794 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7795 			/*
   7796 			 * Don't write TDT before TCTL.EN is set.
   7797 			 * See the document.
   7798 			 */
   7799 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7800 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7801 			    | TXDCTL_WTHRESH(0));
   7802 		else {
   7803 			/* XXX should update with AIM? */
   7804 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7805 			if (sc->sc_type >= WM_T_82540) {
   7806 				/* Should be the same */
   7807 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7808 			}
   7809 
   7810 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7811 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7812 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7813 		}
   7814 	}
   7815 }
   7816 
   7817 static void
   7818 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7819 {
   7820 	int i;
   7821 
   7822 	KASSERT(mutex_owned(txq->txq_lock));
   7823 
   7824 	/* Initialize the transmit job descriptors. */
   7825 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7826 		txq->txq_soft[i].txs_mbuf = NULL;
   7827 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7828 	txq->txq_snext = 0;
   7829 	txq->txq_sdirty = 0;
   7830 }
   7831 
   7832 static void
   7833 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7834     struct wm_txqueue *txq)
   7835 {
   7836 
   7837 	KASSERT(mutex_owned(txq->txq_lock));
   7838 
   7839 	/*
   7840 	 * Set up some register offsets that are different between
   7841 	 * the i82542 and the i82543 and later chips.
   7842 	 */
   7843 	if (sc->sc_type < WM_T_82543)
   7844 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7845 	else
   7846 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7847 
   7848 	wm_init_tx_descs(sc, txq);
   7849 	wm_init_tx_regs(sc, wmq, txq);
   7850 	wm_init_tx_buffer(sc, txq);
   7851 
   7852 	/* Clear other than WM_TXQ_LINKDOWN_DISCARD */
   7853 	txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD;
   7854 
   7855 	txq->txq_sending = false;
   7856 }
   7857 
   7858 static void
   7859 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7860     struct wm_rxqueue *rxq)
   7861 {
   7862 
   7863 	KASSERT(mutex_owned(rxq->rxq_lock));
   7864 
   7865 	/*
   7866 	 * Initialize the receive descriptor and receive job
   7867 	 * descriptor rings.
   7868 	 */
   7869 	if (sc->sc_type < WM_T_82543) {
   7870 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7871 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7872 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7873 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7874 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7875 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7876 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7877 
   7878 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7879 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7880 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7881 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7882 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7883 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7884 	} else {
   7885 		int qid = wmq->wmq_id;
   7886 
   7887 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7888 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7889 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7890 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7891 
   7892 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7893 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7894 				panic("%s: MCLBYTES %d unsupported for 82575 "
   7895 				    "or higher\n", __func__, MCLBYTES);
   7896 
   7897 			/*
   7898 			 * Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF
   7899 			 * only.
   7900 			 */
   7901 			CSR_WRITE(sc, WMREG_SRRCTL(qid),
   7902 			    SRRCTL_DESCTYPE_ADV_ONEBUF
   7903 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7904 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7905 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7906 			    | RXDCTL_WTHRESH(1));
   7907 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7908 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7909 		} else {
   7910 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7911 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7912 			/* XXX should update with AIM? */
   7913 			CSR_WRITE(sc, WMREG_RDTR,
   7914 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7915 			/* MUST be same */
   7916 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7917 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7918 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7919 		}
   7920 	}
   7921 }
   7922 
   7923 static int
   7924 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7925 {
   7926 	struct wm_rxsoft *rxs;
   7927 	int error, i;
   7928 
   7929 	KASSERT(mutex_owned(rxq->rxq_lock));
   7930 
   7931 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7932 		rxs = &rxq->rxq_soft[i];
   7933 		if (rxs->rxs_mbuf == NULL) {
   7934 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7935 				log(LOG_ERR, "%s: unable to allocate or map "
   7936 				    "rx buffer %d, error = %d\n",
   7937 				    device_xname(sc->sc_dev), i, error);
   7938 				/*
   7939 				 * XXX Should attempt to run with fewer receive
   7940 				 * XXX buffers instead of just failing.
   7941 				 */
   7942 				wm_rxdrain(rxq);
   7943 				return ENOMEM;
   7944 			}
   7945 		} else {
   7946 			/*
   7947 			 * For 82575 and 82576, the RX descriptors must be
   7948 			 * initialized after the setting of RCTL.EN in
   7949 			 * wm_set_filter()
   7950 			 */
   7951 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7952 				wm_init_rxdesc(rxq, i);
   7953 		}
   7954 	}
   7955 	rxq->rxq_ptr = 0;
   7956 	rxq->rxq_discard = 0;
   7957 	WM_RXCHAIN_RESET(rxq);
   7958 
   7959 	return 0;
   7960 }
   7961 
   7962 static int
   7963 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7964     struct wm_rxqueue *rxq)
   7965 {
   7966 
   7967 	KASSERT(mutex_owned(rxq->rxq_lock));
   7968 
   7969 	/*
   7970 	 * Set up some register offsets that are different between
   7971 	 * the i82542 and the i82543 and later chips.
   7972 	 */
   7973 	if (sc->sc_type < WM_T_82543)
   7974 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7975 	else
   7976 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7977 
   7978 	wm_init_rx_regs(sc, wmq, rxq);
   7979 	return wm_init_rx_buffer(sc, rxq);
   7980 }
   7981 
   7982 /*
   7983  * wm_init_quques:
   7984  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7985  */
   7986 static int
   7987 wm_init_txrx_queues(struct wm_softc *sc)
   7988 {
   7989 	int i, error = 0;
   7990 
   7991 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7992 		device_xname(sc->sc_dev), __func__));
   7993 
   7994 	for (i = 0; i < sc->sc_nqueues; i++) {
   7995 		struct wm_queue *wmq = &sc->sc_queue[i];
   7996 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7997 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7998 
   7999 		/*
   8000 		 * TODO
   8001 		 * Currently, use constant variable instead of AIM.
   8002 		 * Furthermore, the interrupt interval of multiqueue which use
   8003 		 * polling mode is less than default value.
   8004 		 * More tuning and AIM are required.
   8005 		 */
   8006 		if (wm_is_using_multiqueue(sc))
   8007 			wmq->wmq_itr = 50;
   8008 		else
   8009 			wmq->wmq_itr = sc->sc_itr_init;
   8010 		wmq->wmq_set_itr = true;
   8011 
   8012 		mutex_enter(txq->txq_lock);
   8013 		wm_init_tx_queue(sc, wmq, txq);
   8014 		mutex_exit(txq->txq_lock);
   8015 
   8016 		mutex_enter(rxq->rxq_lock);
   8017 		error = wm_init_rx_queue(sc, wmq, rxq);
   8018 		mutex_exit(rxq->rxq_lock);
   8019 		if (error)
   8020 			break;
   8021 	}
   8022 
   8023 	return error;
   8024 }
   8025 
   8026 /*
   8027  * wm_tx_offload:
   8028  *
   8029  *	Set up TCP/IP checksumming parameters for the
   8030  *	specified packet.
   8031  */
   8032 static void
   8033 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8034     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   8035 {
   8036 	struct mbuf *m0 = txs->txs_mbuf;
   8037 	struct livengood_tcpip_ctxdesc *t;
   8038 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   8039 	uint32_t ipcse;
   8040 	struct ether_header *eh;
   8041 	int offset, iphl;
   8042 	uint8_t fields;
   8043 
   8044 	/*
   8045 	 * XXX It would be nice if the mbuf pkthdr had offset
   8046 	 * fields for the protocol headers.
   8047 	 */
   8048 
   8049 	eh = mtod(m0, struct ether_header *);
   8050 	switch (htons(eh->ether_type)) {
   8051 	case ETHERTYPE_IP:
   8052 	case ETHERTYPE_IPV6:
   8053 		offset = ETHER_HDR_LEN;
   8054 		break;
   8055 
   8056 	case ETHERTYPE_VLAN:
   8057 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8058 		break;
   8059 
   8060 	default:
   8061 		/* Don't support this protocol or encapsulation. */
   8062 		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8063 		txq->txq_last_hw_ipcs = 0;
   8064 		txq->txq_last_hw_tucs = 0;
   8065 		*fieldsp = 0;
   8066 		*cmdp = 0;
   8067 		return;
   8068 	}
   8069 
   8070 	if ((m0->m_pkthdr.csum_flags &
   8071 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8072 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8073 	} else
   8074 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8075 
   8076 	ipcse = offset + iphl - 1;
   8077 
   8078 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   8079 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   8080 	seg = 0;
   8081 	fields = 0;
   8082 
   8083 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8084 		int hlen = offset + iphl;
   8085 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8086 
   8087 		if (__predict_false(m0->m_len <
   8088 				    (hlen + sizeof(struct tcphdr)))) {
   8089 			/*
   8090 			 * TCP/IP headers are not in the first mbuf; we need
   8091 			 * to do this the slow and painful way. Let's just
   8092 			 * hope this doesn't happen very often.
   8093 			 */
   8094 			struct tcphdr th;
   8095 
   8096 			WM_Q_EVCNT_INCR(txq, tsopain);
   8097 
   8098 			m_copydata(m0, hlen, sizeof(th), &th);
   8099 			if (v4) {
   8100 				struct ip ip;
   8101 
   8102 				m_copydata(m0, offset, sizeof(ip), &ip);
   8103 				ip.ip_len = 0;
   8104 				m_copyback(m0,
   8105 				    offset + offsetof(struct ip, ip_len),
   8106 				    sizeof(ip.ip_len), &ip.ip_len);
   8107 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8108 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8109 			} else {
   8110 				struct ip6_hdr ip6;
   8111 
   8112 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8113 				ip6.ip6_plen = 0;
   8114 				m_copyback(m0,
   8115 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8116 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8117 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8118 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8119 			}
   8120 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8121 			    sizeof(th.th_sum), &th.th_sum);
   8122 
   8123 			hlen += th.th_off << 2;
   8124 		} else {
   8125 			/*
   8126 			 * TCP/IP headers are in the first mbuf; we can do
   8127 			 * this the easy way.
   8128 			 */
   8129 			struct tcphdr *th;
   8130 
   8131 			if (v4) {
   8132 				struct ip *ip =
   8133 				    (void *)(mtod(m0, char *) + offset);
   8134 				th = (void *)(mtod(m0, char *) + hlen);
   8135 
   8136 				ip->ip_len = 0;
   8137 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8138 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8139 			} else {
   8140 				struct ip6_hdr *ip6 =
   8141 				    (void *)(mtod(m0, char *) + offset);
   8142 				th = (void *)(mtod(m0, char *) + hlen);
   8143 
   8144 				ip6->ip6_plen = 0;
   8145 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8146 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8147 			}
   8148 			hlen += th->th_off << 2;
   8149 		}
   8150 
   8151 		if (v4) {
   8152 			WM_Q_EVCNT_INCR(txq, tso);
   8153 			cmdlen |= WTX_TCPIP_CMD_IP;
   8154 		} else {
   8155 			WM_Q_EVCNT_INCR(txq, tso6);
   8156 			ipcse = 0;
   8157 		}
   8158 		cmd |= WTX_TCPIP_CMD_TSE;
   8159 		cmdlen |= WTX_TCPIP_CMD_TSE |
   8160 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   8161 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   8162 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   8163 	}
   8164 
   8165 	/*
   8166 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   8167 	 * offload feature, if we load the context descriptor, we
   8168 	 * MUST provide valid values for IPCSS and TUCSS fields.
   8169 	 */
   8170 
   8171 	ipcs = WTX_TCPIP_IPCSS(offset) |
   8172 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   8173 	    WTX_TCPIP_IPCSE(ipcse);
   8174 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   8175 		WM_Q_EVCNT_INCR(txq, ipsum);
   8176 		fields |= WTX_IXSM;
   8177 	}
   8178 
   8179 	offset += iphl;
   8180 
   8181 	if (m0->m_pkthdr.csum_flags &
   8182 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   8183 		WM_Q_EVCNT_INCR(txq, tusum);
   8184 		fields |= WTX_TXSM;
   8185 		tucs = WTX_TCPIP_TUCSS(offset) |
   8186 		    WTX_TCPIP_TUCSO(offset +
   8187 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   8188 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8189 	} else if ((m0->m_pkthdr.csum_flags &
   8190 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   8191 		WM_Q_EVCNT_INCR(txq, tusum6);
   8192 		fields |= WTX_TXSM;
   8193 		tucs = WTX_TCPIP_TUCSS(offset) |
   8194 		    WTX_TCPIP_TUCSO(offset +
   8195 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   8196 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8197 	} else {
   8198 		/* Just initialize it to a valid TCP context. */
   8199 		tucs = WTX_TCPIP_TUCSS(offset) |
   8200 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   8201 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8202 	}
   8203 
   8204 	*cmdp = cmd;
   8205 	*fieldsp = fields;
   8206 
   8207 	/*
   8208 	 * We don't have to write context descriptor for every packet
   8209 	 * except for 82574. For 82574, we must write context descriptor
   8210 	 * for every packet when we use two descriptor queues.
   8211 	 *
   8212 	 * The 82574L can only remember the *last* context used
   8213 	 * regardless of queue that it was use for.  We cannot reuse
   8214 	 * contexts on this hardware platform and must generate a new
   8215 	 * context every time.  82574L hardware spec, section 7.2.6,
   8216 	 * second note.
   8217 	 */
   8218 	if (sc->sc_nqueues < 2) {
   8219 		/*
   8220 		 * Setting up new checksum offload context for every
   8221 		 * frames takes a lot of processing time for hardware.
   8222 		 * This also reduces performance a lot for small sized
   8223 		 * frames so avoid it if driver can use previously
   8224 		 * configured checksum offload context.
   8225 		 * For TSO, in theory we can use the same TSO context only if
   8226 		 * frame is the same type(IP/TCP) and the same MSS. However
   8227 		 * checking whether a frame has the same IP/TCP structure is a
   8228 		 * hard thing so just ignore that and always restablish a
   8229 		 * new TSO context.
   8230 		 */
   8231 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   8232 		    == 0) {
   8233 			if (txq->txq_last_hw_cmd == cmd &&
   8234 			    txq->txq_last_hw_fields == fields &&
   8235 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   8236 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   8237 				WM_Q_EVCNT_INCR(txq, skipcontext);
   8238 				return;
   8239 			}
   8240 		}
   8241 
   8242 		txq->txq_last_hw_cmd = cmd;
   8243 		txq->txq_last_hw_fields = fields;
   8244 		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   8245 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   8246 	}
   8247 
   8248 	/* Fill in the context descriptor. */
   8249 	t = (struct livengood_tcpip_ctxdesc *)
   8250 	    &txq->txq_descs[txq->txq_next];
   8251 	t->tcpip_ipcs = htole32(ipcs);
   8252 	t->tcpip_tucs = htole32(tucs);
   8253 	t->tcpip_cmdlen = htole32(cmdlen);
   8254 	t->tcpip_seg = htole32(seg);
   8255 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8256 
   8257 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8258 	txs->txs_ndesc++;
   8259 }
   8260 
   8261 static inline int
   8262 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   8263 {
   8264 	struct wm_softc *sc = ifp->if_softc;
   8265 	u_int cpuid = cpu_index(curcpu());
   8266 
   8267 	/*
   8268 	 * Currently, simple distribute strategy.
   8269 	 * TODO:
   8270 	 * distribute by flowid(RSS has value).
   8271 	 */
   8272 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   8273 }
   8274 
   8275 static inline bool
   8276 wm_linkdown_discard(struct wm_txqueue *txq)
   8277 {
   8278 
   8279 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
   8280 		return true;
   8281 
   8282 	return false;
   8283 }
   8284 
   8285 /*
   8286  * wm_start:		[ifnet interface function]
   8287  *
   8288  *	Start packet transmission on the interface.
   8289  */
   8290 static void
   8291 wm_start(struct ifnet *ifp)
   8292 {
   8293 	struct wm_softc *sc = ifp->if_softc;
   8294 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8295 
   8296 #ifdef WM_MPSAFE
   8297 	KASSERT(if_is_mpsafe(ifp));
   8298 #endif
   8299 	/*
   8300 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8301 	 */
   8302 
   8303 	mutex_enter(txq->txq_lock);
   8304 	if (!txq->txq_stopping)
   8305 		wm_start_locked(ifp);
   8306 	mutex_exit(txq->txq_lock);
   8307 }
   8308 
   8309 static void
   8310 wm_start_locked(struct ifnet *ifp)
   8311 {
   8312 	struct wm_softc *sc = ifp->if_softc;
   8313 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8314 
   8315 	wm_send_common_locked(ifp, txq, false);
   8316 }
   8317 
   8318 static int
   8319 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   8320 {
   8321 	int qid;
   8322 	struct wm_softc *sc = ifp->if_softc;
   8323 	struct wm_txqueue *txq;
   8324 
   8325 	qid = wm_select_txqueue(ifp, m);
   8326 	txq = &sc->sc_queue[qid].wmq_txq;
   8327 
   8328 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8329 		m_freem(m);
   8330 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8331 		return ENOBUFS;
   8332 	}
   8333 
   8334 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8335 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8336 	if (m->m_flags & M_MCAST)
   8337 		if_statinc_ref(nsr, if_omcasts);
   8338 	IF_STAT_PUTREF(ifp);
   8339 
   8340 	if (mutex_tryenter(txq->txq_lock)) {
   8341 		if (!txq->txq_stopping)
   8342 			wm_transmit_locked(ifp, txq);
   8343 		mutex_exit(txq->txq_lock);
   8344 	}
   8345 
   8346 	return 0;
   8347 }
   8348 
   8349 static void
   8350 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8351 {
   8352 
   8353 	wm_send_common_locked(ifp, txq, true);
   8354 }
   8355 
   8356 static void
   8357 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8358     bool is_transmit)
   8359 {
   8360 	struct wm_softc *sc = ifp->if_softc;
   8361 	struct mbuf *m0;
   8362 	struct wm_txsoft *txs;
   8363 	bus_dmamap_t dmamap;
   8364 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   8365 	bus_addr_t curaddr;
   8366 	bus_size_t seglen, curlen;
   8367 	uint32_t cksumcmd;
   8368 	uint8_t cksumfields;
   8369 	bool remap = true;
   8370 
   8371 	KASSERT(mutex_owned(txq->txq_lock));
   8372 
   8373 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8374 		return;
   8375 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8376 		return;
   8377 
   8378 	if (__predict_false(wm_linkdown_discard(txq))) {
   8379 		do {
   8380 			if (is_transmit)
   8381 				m0 = pcq_get(txq->txq_interq);
   8382 			else
   8383 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   8384 			/*
   8385 			 * increment successed packet counter as in the case
   8386 			 * which the packet is discarded by link down PHY.
   8387 			 */
   8388 			if (m0 != NULL) {
   8389 				if_statinc(ifp, if_opackets);
   8390 				m_freem(m0);
   8391 			}
   8392 		} while (m0 != NULL);
   8393 		return;
   8394 	}
   8395 
   8396 	/* Remember the previous number of free descriptors. */
   8397 	ofree = txq->txq_free;
   8398 
   8399 	/*
   8400 	 * Loop through the send queue, setting up transmit descriptors
   8401 	 * until we drain the queue, or use up all available transmit
   8402 	 * descriptors.
   8403 	 */
   8404 	for (;;) {
   8405 		m0 = NULL;
   8406 
   8407 		/* Get a work queue entry. */
   8408 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8409 			wm_txeof(txq, UINT_MAX);
   8410 			if (txq->txq_sfree == 0) {
   8411 				DPRINTF(sc, WM_DEBUG_TX,
   8412 				    ("%s: TX: no free job descriptors\n",
   8413 					device_xname(sc->sc_dev)));
   8414 				WM_Q_EVCNT_INCR(txq, txsstall);
   8415 				break;
   8416 			}
   8417 		}
   8418 
   8419 		/* Grab a packet off the queue. */
   8420 		if (is_transmit)
   8421 			m0 = pcq_get(txq->txq_interq);
   8422 		else
   8423 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8424 		if (m0 == NULL)
   8425 			break;
   8426 
   8427 		DPRINTF(sc, WM_DEBUG_TX,
   8428 		    ("%s: TX: have packet to transmit: %p\n",
   8429 			device_xname(sc->sc_dev), m0));
   8430 
   8431 		txs = &txq->txq_soft[txq->txq_snext];
   8432 		dmamap = txs->txs_dmamap;
   8433 
   8434 		use_tso = (m0->m_pkthdr.csum_flags &
   8435 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   8436 
   8437 		/*
   8438 		 * So says the Linux driver:
   8439 		 * The controller does a simple calculation to make sure
   8440 		 * there is enough room in the FIFO before initiating the
   8441 		 * DMA for each buffer. The calc is:
   8442 		 *	4 = ceil(buffer len / MSS)
   8443 		 * To make sure we don't overrun the FIFO, adjust the max
   8444 		 * buffer len if the MSS drops.
   8445 		 */
   8446 		dmamap->dm_maxsegsz =
   8447 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   8448 		    ? m0->m_pkthdr.segsz << 2
   8449 		    : WTX_MAX_LEN;
   8450 
   8451 		/*
   8452 		 * Load the DMA map.  If this fails, the packet either
   8453 		 * didn't fit in the allotted number of segments, or we
   8454 		 * were short on resources.  For the too-many-segments
   8455 		 * case, we simply report an error and drop the packet,
   8456 		 * since we can't sanely copy a jumbo packet to a single
   8457 		 * buffer.
   8458 		 */
   8459 retry:
   8460 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8461 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8462 		if (__predict_false(error)) {
   8463 			if (error == EFBIG) {
   8464 				if (remap == true) {
   8465 					struct mbuf *m;
   8466 
   8467 					remap = false;
   8468 					m = m_defrag(m0, M_NOWAIT);
   8469 					if (m != NULL) {
   8470 						WM_Q_EVCNT_INCR(txq, defrag);
   8471 						m0 = m;
   8472 						goto retry;
   8473 					}
   8474 				}
   8475 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8476 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8477 				    "DMA segments, dropping...\n",
   8478 				    device_xname(sc->sc_dev));
   8479 				wm_dump_mbuf_chain(sc, m0);
   8480 				m_freem(m0);
   8481 				continue;
   8482 			}
   8483 			/* Short on resources, just stop for now. */
   8484 			DPRINTF(sc, WM_DEBUG_TX,
   8485 			    ("%s: TX: dmamap load failed: %d\n",
   8486 				device_xname(sc->sc_dev), error));
   8487 			break;
   8488 		}
   8489 
   8490 		segs_needed = dmamap->dm_nsegs;
   8491 		if (use_tso) {
   8492 			/* For sentinel descriptor; see below. */
   8493 			segs_needed++;
   8494 		}
   8495 
   8496 		/*
   8497 		 * Ensure we have enough descriptors free to describe
   8498 		 * the packet. Note, we always reserve one descriptor
   8499 		 * at the end of the ring due to the semantics of the
   8500 		 * TDT register, plus one more in the event we need
   8501 		 * to load offload context.
   8502 		 */
   8503 		if (segs_needed > txq->txq_free - 2) {
   8504 			/*
   8505 			 * Not enough free descriptors to transmit this
   8506 			 * packet.  We haven't committed anything yet,
   8507 			 * so just unload the DMA map, put the packet
   8508 			 * pack on the queue, and punt. Notify the upper
   8509 			 * layer that there are no more slots left.
   8510 			 */
   8511 			DPRINTF(sc, WM_DEBUG_TX,
   8512 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8513 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8514 				segs_needed, txq->txq_free - 1));
   8515 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8516 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8517 			WM_Q_EVCNT_INCR(txq, txdstall);
   8518 			break;
   8519 		}
   8520 
   8521 		/*
   8522 		 * Check for 82547 Tx FIFO bug. We need to do this
   8523 		 * once we know we can transmit the packet, since we
   8524 		 * do some internal FIFO space accounting here.
   8525 		 */
   8526 		if (sc->sc_type == WM_T_82547 &&
   8527 		    wm_82547_txfifo_bugchk(sc, m0)) {
   8528 			DPRINTF(sc, WM_DEBUG_TX,
   8529 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   8530 				device_xname(sc->sc_dev)));
   8531 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8532 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8533 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   8534 			break;
   8535 		}
   8536 
   8537 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8538 
   8539 		DPRINTF(sc, WM_DEBUG_TX,
   8540 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8541 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8542 
   8543 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8544 
   8545 		/*
   8546 		 * Store a pointer to the packet so that we can free it
   8547 		 * later.
   8548 		 *
   8549 		 * Initially, we consider the number of descriptors the
   8550 		 * packet uses the number of DMA segments.  This may be
   8551 		 * incremented by 1 if we do checksum offload (a descriptor
   8552 		 * is used to set the checksum context).
   8553 		 */
   8554 		txs->txs_mbuf = m0;
   8555 		txs->txs_firstdesc = txq->txq_next;
   8556 		txs->txs_ndesc = segs_needed;
   8557 
   8558 		/* Set up offload parameters for this packet. */
   8559 		if (m0->m_pkthdr.csum_flags &
   8560 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8561 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8562 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8563 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   8564 		} else {
   8565 			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8566 			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   8567 			cksumcmd = 0;
   8568 			cksumfields = 0;
   8569 		}
   8570 
   8571 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   8572 
   8573 		/* Sync the DMA map. */
   8574 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8575 		    BUS_DMASYNC_PREWRITE);
   8576 
   8577 		/* Initialize the transmit descriptor. */
   8578 		for (nexttx = txq->txq_next, seg = 0;
   8579 		     seg < dmamap->dm_nsegs; seg++) {
   8580 			for (seglen = dmamap->dm_segs[seg].ds_len,
   8581 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   8582 			     seglen != 0;
   8583 			     curaddr += curlen, seglen -= curlen,
   8584 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   8585 				curlen = seglen;
   8586 
   8587 				/*
   8588 				 * So says the Linux driver:
   8589 				 * Work around for premature descriptor
   8590 				 * write-backs in TSO mode.  Append a
   8591 				 * 4-byte sentinel descriptor.
   8592 				 */
   8593 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   8594 				    curlen > 8)
   8595 					curlen -= 4;
   8596 
   8597 				wm_set_dma_addr(
   8598 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   8599 				txq->txq_descs[nexttx].wtx_cmdlen
   8600 				    = htole32(cksumcmd | curlen);
   8601 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   8602 				    = 0;
   8603 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   8604 				    = cksumfields;
   8605 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8606 				lasttx = nexttx;
   8607 
   8608 				DPRINTF(sc, WM_DEBUG_TX,
   8609 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   8610 					"len %#04zx\n",
   8611 					device_xname(sc->sc_dev), nexttx,
   8612 					(uint64_t)curaddr, curlen));
   8613 			}
   8614 		}
   8615 
   8616 		KASSERT(lasttx != -1);
   8617 
   8618 		/*
   8619 		 * Set up the command byte on the last descriptor of
   8620 		 * the packet. If we're in the interrupt delay window,
   8621 		 * delay the interrupt.
   8622 		 */
   8623 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8624 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8625 
   8626 		/*
   8627 		 * If VLANs are enabled and the packet has a VLAN tag, set
   8628 		 * up the descriptor to encapsulate the packet for us.
   8629 		 *
   8630 		 * This is only valid on the last descriptor of the packet.
   8631 		 */
   8632 		if (vlan_has_tag(m0)) {
   8633 			txq->txq_descs[lasttx].wtx_cmdlen |=
   8634 			    htole32(WTX_CMD_VLE);
   8635 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   8636 			    = htole16(vlan_get_tag(m0));
   8637 		}
   8638 
   8639 		txs->txs_lastdesc = lasttx;
   8640 
   8641 		DPRINTF(sc, WM_DEBUG_TX,
   8642 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8643 			device_xname(sc->sc_dev),
   8644 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8645 
   8646 		/* Sync the descriptors we're using. */
   8647 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8648 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8649 
   8650 		/* Give the packet to the chip. */
   8651 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8652 
   8653 		DPRINTF(sc, WM_DEBUG_TX,
   8654 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8655 
   8656 		DPRINTF(sc, WM_DEBUG_TX,
   8657 		    ("%s: TX: finished transmitting packet, job %d\n",
   8658 			device_xname(sc->sc_dev), txq->txq_snext));
   8659 
   8660 		/* Advance the tx pointer. */
   8661 		txq->txq_free -= txs->txs_ndesc;
   8662 		txq->txq_next = nexttx;
   8663 
   8664 		txq->txq_sfree--;
   8665 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8666 
   8667 		/* Pass the packet to any BPF listeners. */
   8668 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8669 	}
   8670 
   8671 	if (m0 != NULL) {
   8672 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8673 		WM_Q_EVCNT_INCR(txq, descdrop);
   8674 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8675 			__func__));
   8676 		m_freem(m0);
   8677 	}
   8678 
   8679 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8680 		/* No more slots; notify upper layer. */
   8681 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8682 	}
   8683 
   8684 	if (txq->txq_free != ofree) {
   8685 		/* Set a watchdog timer in case the chip flakes out. */
   8686 		txq->txq_lastsent = time_uptime;
   8687 		txq->txq_sending = true;
   8688 	}
   8689 }
   8690 
   8691 /*
   8692  * wm_nq_tx_offload:
   8693  *
   8694  *	Set up TCP/IP checksumming parameters for the
   8695  *	specified packet, for NEWQUEUE devices
   8696  */
   8697 static void
   8698 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8699     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   8700 {
   8701 	struct mbuf *m0 = txs->txs_mbuf;
   8702 	uint32_t vl_len, mssidx, cmdc;
   8703 	struct ether_header *eh;
   8704 	int offset, iphl;
   8705 
   8706 	/*
   8707 	 * XXX It would be nice if the mbuf pkthdr had offset
   8708 	 * fields for the protocol headers.
   8709 	 */
   8710 	*cmdlenp = 0;
   8711 	*fieldsp = 0;
   8712 
   8713 	eh = mtod(m0, struct ether_header *);
   8714 	switch (htons(eh->ether_type)) {
   8715 	case ETHERTYPE_IP:
   8716 	case ETHERTYPE_IPV6:
   8717 		offset = ETHER_HDR_LEN;
   8718 		break;
   8719 
   8720 	case ETHERTYPE_VLAN:
   8721 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8722 		break;
   8723 
   8724 	default:
   8725 		/* Don't support this protocol or encapsulation. */
   8726 		*do_csum = false;
   8727 		return;
   8728 	}
   8729 	*do_csum = true;
   8730 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   8731 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   8732 
   8733 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   8734 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   8735 
   8736 	if ((m0->m_pkthdr.csum_flags &
   8737 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8738 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8739 	} else {
   8740 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8741 	}
   8742 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   8743 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   8744 
   8745 	if (vlan_has_tag(m0)) {
   8746 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8747 		    << NQTXC_VLLEN_VLAN_SHIFT);
   8748 		*cmdlenp |= NQTX_CMD_VLE;
   8749 	}
   8750 
   8751 	mssidx = 0;
   8752 
   8753 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8754 		int hlen = offset + iphl;
   8755 		int tcp_hlen;
   8756 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8757 
   8758 		if (__predict_false(m0->m_len <
   8759 				    (hlen + sizeof(struct tcphdr)))) {
   8760 			/*
   8761 			 * TCP/IP headers are not in the first mbuf; we need
   8762 			 * to do this the slow and painful way. Let's just
   8763 			 * hope this doesn't happen very often.
   8764 			 */
   8765 			struct tcphdr th;
   8766 
   8767 			WM_Q_EVCNT_INCR(txq, tsopain);
   8768 
   8769 			m_copydata(m0, hlen, sizeof(th), &th);
   8770 			if (v4) {
   8771 				struct ip ip;
   8772 
   8773 				m_copydata(m0, offset, sizeof(ip), &ip);
   8774 				ip.ip_len = 0;
   8775 				m_copyback(m0,
   8776 				    offset + offsetof(struct ip, ip_len),
   8777 				    sizeof(ip.ip_len), &ip.ip_len);
   8778 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8779 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8780 			} else {
   8781 				struct ip6_hdr ip6;
   8782 
   8783 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8784 				ip6.ip6_plen = 0;
   8785 				m_copyback(m0,
   8786 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8787 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8788 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8789 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8790 			}
   8791 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8792 			    sizeof(th.th_sum), &th.th_sum);
   8793 
   8794 			tcp_hlen = th.th_off << 2;
   8795 		} else {
   8796 			/*
   8797 			 * TCP/IP headers are in the first mbuf; we can do
   8798 			 * this the easy way.
   8799 			 */
   8800 			struct tcphdr *th;
   8801 
   8802 			if (v4) {
   8803 				struct ip *ip =
   8804 				    (void *)(mtod(m0, char *) + offset);
   8805 				th = (void *)(mtod(m0, char *) + hlen);
   8806 
   8807 				ip->ip_len = 0;
   8808 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8809 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8810 			} else {
   8811 				struct ip6_hdr *ip6 =
   8812 				    (void *)(mtod(m0, char *) + offset);
   8813 				th = (void *)(mtod(m0, char *) + hlen);
   8814 
   8815 				ip6->ip6_plen = 0;
   8816 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8817 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8818 			}
   8819 			tcp_hlen = th->th_off << 2;
   8820 		}
   8821 		hlen += tcp_hlen;
   8822 		*cmdlenp |= NQTX_CMD_TSE;
   8823 
   8824 		if (v4) {
   8825 			WM_Q_EVCNT_INCR(txq, tso);
   8826 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   8827 		} else {
   8828 			WM_Q_EVCNT_INCR(txq, tso6);
   8829 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   8830 		}
   8831 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   8832 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8833 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   8834 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   8835 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   8836 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   8837 	} else {
   8838 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   8839 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8840 	}
   8841 
   8842 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   8843 		*fieldsp |= NQTXD_FIELDS_IXSM;
   8844 		cmdc |= NQTXC_CMD_IP4;
   8845 	}
   8846 
   8847 	if (m0->m_pkthdr.csum_flags &
   8848 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   8849 		WM_Q_EVCNT_INCR(txq, tusum);
   8850 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   8851 			cmdc |= NQTXC_CMD_TCP;
   8852 		else
   8853 			cmdc |= NQTXC_CMD_UDP;
   8854 
   8855 		cmdc |= NQTXC_CMD_IP4;
   8856 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8857 	}
   8858 	if (m0->m_pkthdr.csum_flags &
   8859 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8860 		WM_Q_EVCNT_INCR(txq, tusum6);
   8861 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8862 			cmdc |= NQTXC_CMD_TCP;
   8863 		else
   8864 			cmdc |= NQTXC_CMD_UDP;
   8865 
   8866 		cmdc |= NQTXC_CMD_IP6;
   8867 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8868 	}
   8869 
   8870 	/*
   8871 	 * We don't have to write context descriptor for every packet to
   8872 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8873 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8874 	 * controllers.
   8875 	 * It would be overhead to write context descriptor for every packet,
   8876 	 * however it does not cause problems.
   8877 	 */
   8878 	/* Fill in the context descriptor. */
   8879 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   8880 	    htole32(vl_len);
   8881 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8882 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8883 	    htole32(cmdc);
   8884 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8885 	    htole32(mssidx);
   8886 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8887 	DPRINTF(sc, WM_DEBUG_TX,
   8888 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8889 		txq->txq_next, 0, vl_len));
   8890 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8891 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8892 	txs->txs_ndesc++;
   8893 }
   8894 
   8895 /*
   8896  * wm_nq_start:		[ifnet interface function]
   8897  *
   8898  *	Start packet transmission on the interface for NEWQUEUE devices
   8899  */
   8900 static void
   8901 wm_nq_start(struct ifnet *ifp)
   8902 {
   8903 	struct wm_softc *sc = ifp->if_softc;
   8904 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8905 
   8906 #ifdef WM_MPSAFE
   8907 	KASSERT(if_is_mpsafe(ifp));
   8908 #endif
   8909 	/*
   8910 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8911 	 */
   8912 
   8913 	mutex_enter(txq->txq_lock);
   8914 	if (!txq->txq_stopping)
   8915 		wm_nq_start_locked(ifp);
   8916 	mutex_exit(txq->txq_lock);
   8917 }
   8918 
   8919 static void
   8920 wm_nq_start_locked(struct ifnet *ifp)
   8921 {
   8922 	struct wm_softc *sc = ifp->if_softc;
   8923 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8924 
   8925 	wm_nq_send_common_locked(ifp, txq, false);
   8926 }
   8927 
   8928 static int
   8929 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8930 {
   8931 	int qid;
   8932 	struct wm_softc *sc = ifp->if_softc;
   8933 	struct wm_txqueue *txq;
   8934 
   8935 	qid = wm_select_txqueue(ifp, m);
   8936 	txq = &sc->sc_queue[qid].wmq_txq;
   8937 
   8938 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8939 		m_freem(m);
   8940 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8941 		return ENOBUFS;
   8942 	}
   8943 
   8944 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8945 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8946 	if (m->m_flags & M_MCAST)
   8947 		if_statinc_ref(nsr, if_omcasts);
   8948 	IF_STAT_PUTREF(ifp);
   8949 
   8950 	/*
   8951 	 * The situations which this mutex_tryenter() fails at running time
   8952 	 * are below two patterns.
   8953 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8954 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8955 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8956 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8957 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8958 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8959 	 * stuck, either.
   8960 	 */
   8961 	if (mutex_tryenter(txq->txq_lock)) {
   8962 		if (!txq->txq_stopping)
   8963 			wm_nq_transmit_locked(ifp, txq);
   8964 		mutex_exit(txq->txq_lock);
   8965 	}
   8966 
   8967 	return 0;
   8968 }
   8969 
   8970 static void
   8971 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8972 {
   8973 
   8974 	wm_nq_send_common_locked(ifp, txq, true);
   8975 }
   8976 
   8977 static void
   8978 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8979     bool is_transmit)
   8980 {
   8981 	struct wm_softc *sc = ifp->if_softc;
   8982 	struct mbuf *m0;
   8983 	struct wm_txsoft *txs;
   8984 	bus_dmamap_t dmamap;
   8985 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8986 	bool do_csum, sent;
   8987 	bool remap = true;
   8988 
   8989 	KASSERT(mutex_owned(txq->txq_lock));
   8990 
   8991 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8992 		return;
   8993 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8994 		return;
   8995 
   8996 	if (__predict_false(wm_linkdown_discard(txq))) {
   8997 		do {
   8998 			if (is_transmit)
   8999 				m0 = pcq_get(txq->txq_interq);
   9000 			else
   9001 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   9002 			/*
   9003 			 * increment successed packet counter as in the case
   9004 			 * which the packet is discarded by link down PHY.
   9005 			 */
   9006 			if (m0 != NULL) {
   9007 				if_statinc(ifp, if_opackets);
   9008 				m_freem(m0);
   9009 			}
   9010 		} while (m0 != NULL);
   9011 		return;
   9012 	}
   9013 
   9014 	sent = false;
   9015 
   9016 	/*
   9017 	 * Loop through the send queue, setting up transmit descriptors
   9018 	 * until we drain the queue, or use up all available transmit
   9019 	 * descriptors.
   9020 	 */
   9021 	for (;;) {
   9022 		m0 = NULL;
   9023 
   9024 		/* Get a work queue entry. */
   9025 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   9026 			wm_txeof(txq, UINT_MAX);
   9027 			if (txq->txq_sfree == 0) {
   9028 				DPRINTF(sc, WM_DEBUG_TX,
   9029 				    ("%s: TX: no free job descriptors\n",
   9030 					device_xname(sc->sc_dev)));
   9031 				WM_Q_EVCNT_INCR(txq, txsstall);
   9032 				break;
   9033 			}
   9034 		}
   9035 
   9036 		/* Grab a packet off the queue. */
   9037 		if (is_transmit)
   9038 			m0 = pcq_get(txq->txq_interq);
   9039 		else
   9040 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   9041 		if (m0 == NULL)
   9042 			break;
   9043 
   9044 		DPRINTF(sc, WM_DEBUG_TX,
   9045 		    ("%s: TX: have packet to transmit: %p\n",
   9046 		    device_xname(sc->sc_dev), m0));
   9047 
   9048 		txs = &txq->txq_soft[txq->txq_snext];
   9049 		dmamap = txs->txs_dmamap;
   9050 
   9051 		/*
   9052 		 * Load the DMA map.  If this fails, the packet either
   9053 		 * didn't fit in the allotted number of segments, or we
   9054 		 * were short on resources.  For the too-many-segments
   9055 		 * case, we simply report an error and drop the packet,
   9056 		 * since we can't sanely copy a jumbo packet to a single
   9057 		 * buffer.
   9058 		 */
   9059 retry:
   9060 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   9061 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   9062 		if (__predict_false(error)) {
   9063 			if (error == EFBIG) {
   9064 				if (remap == true) {
   9065 					struct mbuf *m;
   9066 
   9067 					remap = false;
   9068 					m = m_defrag(m0, M_NOWAIT);
   9069 					if (m != NULL) {
   9070 						WM_Q_EVCNT_INCR(txq, defrag);
   9071 						m0 = m;
   9072 						goto retry;
   9073 					}
   9074 				}
   9075 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   9076 				log(LOG_ERR, "%s: Tx packet consumes too many "
   9077 				    "DMA segments, dropping...\n",
   9078 				    device_xname(sc->sc_dev));
   9079 				wm_dump_mbuf_chain(sc, m0);
   9080 				m_freem(m0);
   9081 				continue;
   9082 			}
   9083 			/* Short on resources, just stop for now. */
   9084 			DPRINTF(sc, WM_DEBUG_TX,
   9085 			    ("%s: TX: dmamap load failed: %d\n",
   9086 				device_xname(sc->sc_dev), error));
   9087 			break;
   9088 		}
   9089 
   9090 		segs_needed = dmamap->dm_nsegs;
   9091 
   9092 		/*
   9093 		 * Ensure we have enough descriptors free to describe
   9094 		 * the packet. Note, we always reserve one descriptor
   9095 		 * at the end of the ring due to the semantics of the
   9096 		 * TDT register, plus one more in the event we need
   9097 		 * to load offload context.
   9098 		 */
   9099 		if (segs_needed > txq->txq_free - 2) {
   9100 			/*
   9101 			 * Not enough free descriptors to transmit this
   9102 			 * packet.  We haven't committed anything yet,
   9103 			 * so just unload the DMA map, put the packet
   9104 			 * pack on the queue, and punt. Notify the upper
   9105 			 * layer that there are no more slots left.
   9106 			 */
   9107 			DPRINTF(sc, WM_DEBUG_TX,
   9108 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   9109 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   9110 				segs_needed, txq->txq_free - 1));
   9111 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   9112 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   9113 			WM_Q_EVCNT_INCR(txq, txdstall);
   9114 			break;
   9115 		}
   9116 
   9117 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   9118 
   9119 		DPRINTF(sc, WM_DEBUG_TX,
   9120 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   9121 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   9122 
   9123 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   9124 
   9125 		/*
   9126 		 * Store a pointer to the packet so that we can free it
   9127 		 * later.
   9128 		 *
   9129 		 * Initially, we consider the number of descriptors the
   9130 		 * packet uses the number of DMA segments.  This may be
   9131 		 * incremented by 1 if we do checksum offload (a descriptor
   9132 		 * is used to set the checksum context).
   9133 		 */
   9134 		txs->txs_mbuf = m0;
   9135 		txs->txs_firstdesc = txq->txq_next;
   9136 		txs->txs_ndesc = segs_needed;
   9137 
   9138 		/* Set up offload parameters for this packet. */
   9139 		uint32_t cmdlen, fields, dcmdlen;
   9140 		if (m0->m_pkthdr.csum_flags &
   9141 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   9142 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9143 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   9144 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   9145 			    &do_csum);
   9146 		} else {
   9147 			do_csum = false;
   9148 			cmdlen = 0;
   9149 			fields = 0;
   9150 		}
   9151 
   9152 		/* Sync the DMA map. */
   9153 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   9154 		    BUS_DMASYNC_PREWRITE);
   9155 
   9156 		/* Initialize the first transmit descriptor. */
   9157 		nexttx = txq->txq_next;
   9158 		if (!do_csum) {
   9159 			/* Set up a legacy descriptor */
   9160 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   9161 			    dmamap->dm_segs[0].ds_addr);
   9162 			txq->txq_descs[nexttx].wtx_cmdlen =
   9163 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   9164 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   9165 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   9166 			if (vlan_has_tag(m0)) {
   9167 				txq->txq_descs[nexttx].wtx_cmdlen |=
   9168 				    htole32(WTX_CMD_VLE);
   9169 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   9170 				    htole16(vlan_get_tag(m0));
   9171 			} else
   9172 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   9173 
   9174 			dcmdlen = 0;
   9175 		} else {
   9176 			/* Set up an advanced data descriptor */
   9177 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9178 			    htole64(dmamap->dm_segs[0].ds_addr);
   9179 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   9180 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9181 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   9182 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   9183 			    htole32(fields);
   9184 			DPRINTF(sc, WM_DEBUG_TX,
   9185 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   9186 				device_xname(sc->sc_dev), nexttx,
   9187 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   9188 			DPRINTF(sc, WM_DEBUG_TX,
   9189 			    ("\t 0x%08x%08x\n", fields,
   9190 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   9191 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   9192 		}
   9193 
   9194 		lasttx = nexttx;
   9195 		nexttx = WM_NEXTTX(txq, nexttx);
   9196 		/*
   9197 		 * Fill in the next descriptors. Legacy or advanced format
   9198 		 * is the same here.
   9199 		 */
   9200 		for (seg = 1; seg < dmamap->dm_nsegs;
   9201 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   9202 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9203 			    htole64(dmamap->dm_segs[seg].ds_addr);
   9204 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9205 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   9206 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   9207 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   9208 			lasttx = nexttx;
   9209 
   9210 			DPRINTF(sc, WM_DEBUG_TX,
   9211 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   9212 				device_xname(sc->sc_dev), nexttx,
   9213 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   9214 				dmamap->dm_segs[seg].ds_len));
   9215 		}
   9216 
   9217 		KASSERT(lasttx != -1);
   9218 
   9219 		/*
   9220 		 * Set up the command byte on the last descriptor of
   9221 		 * the packet. If we're in the interrupt delay window,
   9222 		 * delay the interrupt.
   9223 		 */
   9224 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   9225 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   9226 		txq->txq_descs[lasttx].wtx_cmdlen |=
   9227 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   9228 
   9229 		txs->txs_lastdesc = lasttx;
   9230 
   9231 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   9232 		    device_xname(sc->sc_dev),
   9233 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   9234 
   9235 		/* Sync the descriptors we're using. */
   9236 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   9237 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   9238 
   9239 		/* Give the packet to the chip. */
   9240 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   9241 		sent = true;
   9242 
   9243 		DPRINTF(sc, WM_DEBUG_TX,
   9244 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   9245 
   9246 		DPRINTF(sc, WM_DEBUG_TX,
   9247 		    ("%s: TX: finished transmitting packet, job %d\n",
   9248 			device_xname(sc->sc_dev), txq->txq_snext));
   9249 
   9250 		/* Advance the tx pointer. */
   9251 		txq->txq_free -= txs->txs_ndesc;
   9252 		txq->txq_next = nexttx;
   9253 
   9254 		txq->txq_sfree--;
   9255 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   9256 
   9257 		/* Pass the packet to any BPF listeners. */
   9258 		bpf_mtap(ifp, m0, BPF_D_OUT);
   9259 	}
   9260 
   9261 	if (m0 != NULL) {
   9262 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9263 		WM_Q_EVCNT_INCR(txq, descdrop);
   9264 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   9265 			__func__));
   9266 		m_freem(m0);
   9267 	}
   9268 
   9269 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   9270 		/* No more slots; notify upper layer. */
   9271 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9272 	}
   9273 
   9274 	if (sent) {
   9275 		/* Set a watchdog timer in case the chip flakes out. */
   9276 		txq->txq_lastsent = time_uptime;
   9277 		txq->txq_sending = true;
   9278 	}
   9279 }
   9280 
   9281 static void
   9282 wm_deferred_start_locked(struct wm_txqueue *txq)
   9283 {
   9284 	struct wm_softc *sc = txq->txq_sc;
   9285 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9286 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   9287 	int qid = wmq->wmq_id;
   9288 
   9289 	KASSERT(mutex_owned(txq->txq_lock));
   9290 	KASSERT(!txq->txq_stopping);
   9291 
   9292 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   9293 		/* XXX need for ALTQ or one CPU system */
   9294 		if (qid == 0)
   9295 			wm_nq_start_locked(ifp);
   9296 		wm_nq_transmit_locked(ifp, txq);
   9297 	} else {
   9298 		/* XXX need for ALTQ or one CPU system */
   9299 		if (qid == 0)
   9300 			wm_start_locked(ifp);
   9301 		wm_transmit_locked(ifp, txq);
   9302 	}
   9303 }
   9304 
   9305 /* Interrupt */
   9306 
   9307 /*
   9308  * wm_txeof:
   9309  *
   9310  *	Helper; handle transmit interrupts.
   9311  */
   9312 static bool
   9313 wm_txeof(struct wm_txqueue *txq, u_int limit)
   9314 {
   9315 	struct wm_softc *sc = txq->txq_sc;
   9316 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9317 	struct wm_txsoft *txs;
   9318 	int count = 0;
   9319 	int i;
   9320 	uint8_t status;
   9321 	bool more = false;
   9322 
   9323 	KASSERT(mutex_owned(txq->txq_lock));
   9324 
   9325 	if (txq->txq_stopping)
   9326 		return false;
   9327 
   9328 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   9329 
   9330 	/*
   9331 	 * Go through the Tx list and free mbufs for those
   9332 	 * frames which have been transmitted.
   9333 	 */
   9334 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   9335 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   9336 		txs = &txq->txq_soft[i];
   9337 
   9338 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   9339 			device_xname(sc->sc_dev), i));
   9340 
   9341 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   9342 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9343 
   9344 		status =
   9345 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   9346 		if ((status & WTX_ST_DD) == 0) {
   9347 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   9348 			    BUS_DMASYNC_PREREAD);
   9349 			break;
   9350 		}
   9351 
   9352 		if (limit-- == 0) {
   9353 			more = true;
   9354 			DPRINTF(sc, WM_DEBUG_TX,
   9355 			    ("%s: TX: loop limited, job %d is not processed\n",
   9356 				device_xname(sc->sc_dev), i));
   9357 			break;
   9358 		}
   9359 
   9360 		count++;
   9361 		DPRINTF(sc, WM_DEBUG_TX,
   9362 		    ("%s: TX: job %d done: descs %d..%d\n",
   9363 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   9364 		    txs->txs_lastdesc));
   9365 
   9366 		/*
   9367 		 * XXX We should probably be using the statistics
   9368 		 * XXX registers, but I don't know if they exist
   9369 		 * XXX on chips before the i82544.
   9370 		 */
   9371 
   9372 #ifdef WM_EVENT_COUNTERS
   9373 		if (status & WTX_ST_TU)
   9374 			WM_Q_EVCNT_INCR(txq, underrun);
   9375 #endif /* WM_EVENT_COUNTERS */
   9376 
   9377 		/*
   9378 		 * 82574 and newer's document says the status field has neither
   9379 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   9380 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   9381 		 * Developer's Manual", 82574 datasheet and newer.
   9382 		 *
   9383 		 * XXX I saw the LC bit was set on I218 even though the media
   9384 		 * was full duplex, so the bit might be used for other
   9385 		 * meaning ...(I have no document).
   9386 		 */
   9387 
   9388 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   9389 		    && ((sc->sc_type < WM_T_82574)
   9390 			|| (sc->sc_type == WM_T_80003))) {
   9391 			if_statinc(ifp, if_oerrors);
   9392 			if (status & WTX_ST_LC)
   9393 				log(LOG_WARNING, "%s: late collision\n",
   9394 				    device_xname(sc->sc_dev));
   9395 			else if (status & WTX_ST_EC) {
   9396 				if_statadd(ifp, if_collisions,
   9397 				    TX_COLLISION_THRESHOLD + 1);
   9398 				log(LOG_WARNING, "%s: excessive collisions\n",
   9399 				    device_xname(sc->sc_dev));
   9400 			}
   9401 		} else
   9402 			if_statinc(ifp, if_opackets);
   9403 
   9404 		txq->txq_packets++;
   9405 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   9406 
   9407 		txq->txq_free += txs->txs_ndesc;
   9408 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   9409 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   9410 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   9411 		m_freem(txs->txs_mbuf);
   9412 		txs->txs_mbuf = NULL;
   9413 	}
   9414 
   9415 	/* Update the dirty transmit buffer pointer. */
   9416 	txq->txq_sdirty = i;
   9417 	DPRINTF(sc, WM_DEBUG_TX,
   9418 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   9419 
   9420 	if (count != 0)
   9421 		rnd_add_uint32(&sc->rnd_source, count);
   9422 
   9423 	/*
   9424 	 * If there are no more pending transmissions, cancel the watchdog
   9425 	 * timer.
   9426 	 */
   9427 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   9428 		txq->txq_sending = false;
   9429 
   9430 	return more;
   9431 }
   9432 
   9433 static inline uint32_t
   9434 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   9435 {
   9436 	struct wm_softc *sc = rxq->rxq_sc;
   9437 
   9438 	if (sc->sc_type == WM_T_82574)
   9439 		return EXTRXC_STATUS(
   9440 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9441 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9442 		return NQRXC_STATUS(
   9443 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9444 	else
   9445 		return rxq->rxq_descs[idx].wrx_status;
   9446 }
   9447 
   9448 static inline uint32_t
   9449 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   9450 {
   9451 	struct wm_softc *sc = rxq->rxq_sc;
   9452 
   9453 	if (sc->sc_type == WM_T_82574)
   9454 		return EXTRXC_ERROR(
   9455 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9456 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9457 		return NQRXC_ERROR(
   9458 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9459 	else
   9460 		return rxq->rxq_descs[idx].wrx_errors;
   9461 }
   9462 
   9463 static inline uint16_t
   9464 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   9465 {
   9466 	struct wm_softc *sc = rxq->rxq_sc;
   9467 
   9468 	if (sc->sc_type == WM_T_82574)
   9469 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   9470 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9471 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   9472 	else
   9473 		return rxq->rxq_descs[idx].wrx_special;
   9474 }
   9475 
   9476 static inline int
   9477 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   9478 {
   9479 	struct wm_softc *sc = rxq->rxq_sc;
   9480 
   9481 	if (sc->sc_type == WM_T_82574)
   9482 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   9483 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9484 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   9485 	else
   9486 		return rxq->rxq_descs[idx].wrx_len;
   9487 }
   9488 
   9489 #ifdef WM_DEBUG
   9490 static inline uint32_t
   9491 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   9492 {
   9493 	struct wm_softc *sc = rxq->rxq_sc;
   9494 
   9495 	if (sc->sc_type == WM_T_82574)
   9496 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   9497 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9498 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   9499 	else
   9500 		return 0;
   9501 }
   9502 
   9503 static inline uint8_t
   9504 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   9505 {
   9506 	struct wm_softc *sc = rxq->rxq_sc;
   9507 
   9508 	if (sc->sc_type == WM_T_82574)
   9509 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   9510 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9511 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   9512 	else
   9513 		return 0;
   9514 }
   9515 #endif /* WM_DEBUG */
   9516 
   9517 static inline bool
   9518 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   9519     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9520 {
   9521 
   9522 	if (sc->sc_type == WM_T_82574)
   9523 		return (status & ext_bit) != 0;
   9524 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9525 		return (status & nq_bit) != 0;
   9526 	else
   9527 		return (status & legacy_bit) != 0;
   9528 }
   9529 
   9530 static inline bool
   9531 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   9532     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9533 {
   9534 
   9535 	if (sc->sc_type == WM_T_82574)
   9536 		return (error & ext_bit) != 0;
   9537 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9538 		return (error & nq_bit) != 0;
   9539 	else
   9540 		return (error & legacy_bit) != 0;
   9541 }
   9542 
   9543 static inline bool
   9544 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   9545 {
   9546 
   9547 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9548 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   9549 		return true;
   9550 	else
   9551 		return false;
   9552 }
   9553 
   9554 static inline bool
   9555 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   9556 {
   9557 	struct wm_softc *sc = rxq->rxq_sc;
   9558 
   9559 	/* XXX missing error bit for newqueue? */
   9560 	if (wm_rxdesc_is_set_error(sc, errors,
   9561 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   9562 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   9563 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   9564 		NQRXC_ERROR_RXE)) {
   9565 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   9566 		    EXTRXC_ERROR_SE, 0))
   9567 			log(LOG_WARNING, "%s: symbol error\n",
   9568 			    device_xname(sc->sc_dev));
   9569 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   9570 		    EXTRXC_ERROR_SEQ, 0))
   9571 			log(LOG_WARNING, "%s: receive sequence error\n",
   9572 			    device_xname(sc->sc_dev));
   9573 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   9574 		    EXTRXC_ERROR_CE, 0))
   9575 			log(LOG_WARNING, "%s: CRC error\n",
   9576 			    device_xname(sc->sc_dev));
   9577 		return true;
   9578 	}
   9579 
   9580 	return false;
   9581 }
   9582 
   9583 static inline bool
   9584 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   9585 {
   9586 	struct wm_softc *sc = rxq->rxq_sc;
   9587 
   9588 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   9589 		NQRXC_STATUS_DD)) {
   9590 		/* We have processed all of the receive descriptors. */
   9591 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   9592 		return false;
   9593 	}
   9594 
   9595 	return true;
   9596 }
   9597 
   9598 static inline bool
   9599 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   9600     uint16_t vlantag, struct mbuf *m)
   9601 {
   9602 
   9603 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9604 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   9605 		vlan_set_tag(m, le16toh(vlantag));
   9606 	}
   9607 
   9608 	return true;
   9609 }
   9610 
   9611 static inline void
   9612 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   9613     uint32_t errors, struct mbuf *m)
   9614 {
   9615 	struct wm_softc *sc = rxq->rxq_sc;
   9616 
   9617 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   9618 		if (wm_rxdesc_is_set_status(sc, status,
   9619 		    WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   9620 			WM_Q_EVCNT_INCR(rxq, ipsum);
   9621 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   9622 			if (wm_rxdesc_is_set_error(sc, errors,
   9623 			    WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   9624 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   9625 		}
   9626 		if (wm_rxdesc_is_set_status(sc, status,
   9627 		    WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   9628 			/*
   9629 			 * Note: we don't know if this was TCP or UDP,
   9630 			 * so we just set both bits, and expect the
   9631 			 * upper layers to deal.
   9632 			 */
   9633 			WM_Q_EVCNT_INCR(rxq, tusum);
   9634 			m->m_pkthdr.csum_flags |=
   9635 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9636 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   9637 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   9638 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   9639 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   9640 		}
   9641 	}
   9642 }
   9643 
   9644 /*
   9645  * wm_rxeof:
   9646  *
   9647  *	Helper; handle receive interrupts.
   9648  */
   9649 static bool
   9650 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   9651 {
   9652 	struct wm_softc *sc = rxq->rxq_sc;
   9653 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9654 	struct wm_rxsoft *rxs;
   9655 	struct mbuf *m;
   9656 	int i, len;
   9657 	int count = 0;
   9658 	uint32_t status, errors;
   9659 	uint16_t vlantag;
   9660 	bool more = false;
   9661 
   9662 	KASSERT(mutex_owned(rxq->rxq_lock));
   9663 
   9664 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   9665 		rxs = &rxq->rxq_soft[i];
   9666 
   9667 		DPRINTF(sc, WM_DEBUG_RX,
   9668 		    ("%s: RX: checking descriptor %d\n",
   9669 			device_xname(sc->sc_dev), i));
   9670 		wm_cdrxsync(rxq, i,
   9671 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9672 
   9673 		status = wm_rxdesc_get_status(rxq, i);
   9674 		errors = wm_rxdesc_get_errors(rxq, i);
   9675 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   9676 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   9677 #ifdef WM_DEBUG
   9678 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   9679 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   9680 #endif
   9681 
   9682 		if (!wm_rxdesc_dd(rxq, i, status))
   9683 			break;
   9684 
   9685 		if (limit-- == 0) {
   9686 			more = true;
   9687 			DPRINTF(sc, WM_DEBUG_RX,
   9688 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   9689 				device_xname(sc->sc_dev), i));
   9690 			break;
   9691 		}
   9692 
   9693 		count++;
   9694 		if (__predict_false(rxq->rxq_discard)) {
   9695 			DPRINTF(sc, WM_DEBUG_RX,
   9696 			    ("%s: RX: discarding contents of descriptor %d\n",
   9697 				device_xname(sc->sc_dev), i));
   9698 			wm_init_rxdesc(rxq, i);
   9699 			if (wm_rxdesc_is_eop(rxq, status)) {
   9700 				/* Reset our state. */
   9701 				DPRINTF(sc, WM_DEBUG_RX,
   9702 				    ("%s: RX: resetting rxdiscard -> 0\n",
   9703 					device_xname(sc->sc_dev)));
   9704 				rxq->rxq_discard = 0;
   9705 			}
   9706 			continue;
   9707 		}
   9708 
   9709 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9710 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   9711 
   9712 		m = rxs->rxs_mbuf;
   9713 
   9714 		/*
   9715 		 * Add a new receive buffer to the ring, unless of
   9716 		 * course the length is zero. Treat the latter as a
   9717 		 * failed mapping.
   9718 		 */
   9719 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   9720 			/*
   9721 			 * Failed, throw away what we've done so
   9722 			 * far, and discard the rest of the packet.
   9723 			 */
   9724 			if_statinc(ifp, if_ierrors);
   9725 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9726 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   9727 			wm_init_rxdesc(rxq, i);
   9728 			if (!wm_rxdesc_is_eop(rxq, status))
   9729 				rxq->rxq_discard = 1;
   9730 			if (rxq->rxq_head != NULL)
   9731 				m_freem(rxq->rxq_head);
   9732 			WM_RXCHAIN_RESET(rxq);
   9733 			DPRINTF(sc, WM_DEBUG_RX,
   9734 			    ("%s: RX: Rx buffer allocation failed, "
   9735 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   9736 				rxq->rxq_discard ? " (discard)" : ""));
   9737 			continue;
   9738 		}
   9739 
   9740 		m->m_len = len;
   9741 		rxq->rxq_len += len;
   9742 		DPRINTF(sc, WM_DEBUG_RX,
   9743 		    ("%s: RX: buffer at %p len %d\n",
   9744 			device_xname(sc->sc_dev), m->m_data, len));
   9745 
   9746 		/* If this is not the end of the packet, keep looking. */
   9747 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9748 			WM_RXCHAIN_LINK(rxq, m);
   9749 			DPRINTF(sc, WM_DEBUG_RX,
   9750 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9751 				device_xname(sc->sc_dev), rxq->rxq_len));
   9752 			continue;
   9753 		}
   9754 
   9755 		/*
   9756 		 * Okay, we have the entire packet now. The chip is
   9757 		 * configured to include the FCS except I35[04], I21[01].
   9758 		 * (not all chips can be configured to strip it), so we need
   9759 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
   9760 		 * in RCTL register is always set, so we don't trim it.
   9761 		 * PCH2 and newer chip also not include FCS when jumbo
   9762 		 * frame is used to do workaround an errata.
   9763 		 * May need to adjust length of previous mbuf in the
   9764 		 * chain if the current mbuf is too short.
   9765 		 */
   9766 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
   9767 			if (m->m_len < ETHER_CRC_LEN) {
   9768 				rxq->rxq_tail->m_len
   9769 				    -= (ETHER_CRC_LEN - m->m_len);
   9770 				m->m_len = 0;
   9771 			} else
   9772 				m->m_len -= ETHER_CRC_LEN;
   9773 			len = rxq->rxq_len - ETHER_CRC_LEN;
   9774 		} else
   9775 			len = rxq->rxq_len;
   9776 
   9777 		WM_RXCHAIN_LINK(rxq, m);
   9778 
   9779 		*rxq->rxq_tailp = NULL;
   9780 		m = rxq->rxq_head;
   9781 
   9782 		WM_RXCHAIN_RESET(rxq);
   9783 
   9784 		DPRINTF(sc, WM_DEBUG_RX,
   9785 		    ("%s: RX: have entire packet, len -> %d\n",
   9786 			device_xname(sc->sc_dev), len));
   9787 
   9788 		/* If an error occurred, update stats and drop the packet. */
   9789 		if (wm_rxdesc_has_errors(rxq, errors)) {
   9790 			m_freem(m);
   9791 			continue;
   9792 		}
   9793 
   9794 		/* No errors.  Receive the packet. */
   9795 		m_set_rcvif(m, ifp);
   9796 		m->m_pkthdr.len = len;
   9797 		/*
   9798 		 * TODO
   9799 		 * should be save rsshash and rsstype to this mbuf.
   9800 		 */
   9801 		DPRINTF(sc, WM_DEBUG_RX,
   9802 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   9803 			device_xname(sc->sc_dev), rsstype, rsshash));
   9804 
   9805 		/*
   9806 		 * If VLANs are enabled, VLAN packets have been unwrapped
   9807 		 * for us.  Associate the tag with the packet.
   9808 		 */
   9809 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   9810 			continue;
   9811 
   9812 		/* Set up checksum info for this packet. */
   9813 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   9814 
   9815 		rxq->rxq_packets++;
   9816 		rxq->rxq_bytes += len;
   9817 		/* Pass it on. */
   9818 		if_percpuq_enqueue(sc->sc_ipq, m);
   9819 
   9820 		if (rxq->rxq_stopping)
   9821 			break;
   9822 	}
   9823 	rxq->rxq_ptr = i;
   9824 
   9825 	if (count != 0)
   9826 		rnd_add_uint32(&sc->rnd_source, count);
   9827 
   9828 	DPRINTF(sc, WM_DEBUG_RX,
   9829 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   9830 
   9831 	return more;
   9832 }
   9833 
   9834 /*
   9835  * wm_linkintr_gmii:
   9836  *
   9837  *	Helper; handle link interrupts for GMII.
   9838  */
   9839 static void
   9840 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   9841 {
   9842 	device_t dev = sc->sc_dev;
   9843 	uint32_t status, reg;
   9844 	bool link;
   9845 	int rv;
   9846 
   9847 	KASSERT(WM_CORE_LOCKED(sc));
   9848 
   9849 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9850 		__func__));
   9851 
   9852 	if ((icr & ICR_LSC) == 0) {
   9853 		if (icr & ICR_RXSEQ)
   9854 			DPRINTF(sc, WM_DEBUG_LINK,
   9855 			    ("%s: LINK Receive sequence error\n",
   9856 				device_xname(dev)));
   9857 		return;
   9858 	}
   9859 
   9860 	/* Link status changed */
   9861 	status = CSR_READ(sc, WMREG_STATUS);
   9862 	link = status & STATUS_LU;
   9863 	if (link) {
   9864 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9865 			device_xname(dev),
   9866 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9867 		if (wm_phy_need_linkdown_discard(sc)) {
   9868 			DPRINTF(sc, WM_DEBUG_LINK,
   9869 			    ("%s: linkintr: Clear linkdown discard flag\n",
   9870 				device_xname(dev)));
   9871 			wm_clear_linkdown_discard(sc);
   9872 		}
   9873 	} else {
   9874 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9875 			device_xname(dev)));
   9876 		if (wm_phy_need_linkdown_discard(sc)) {
   9877 			DPRINTF(sc, WM_DEBUG_LINK,
   9878 			    ("%s: linkintr: Set linkdown discard flag\n",
   9879 				device_xname(dev)));
   9880 			wm_set_linkdown_discard(sc);
   9881 		}
   9882 	}
   9883 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9884 		wm_gig_downshift_workaround_ich8lan(sc);
   9885 
   9886 	if ((sc->sc_type == WM_T_ICH8) && (sc->sc_phytype == WMPHY_IGP_3))
   9887 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9888 
   9889 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9890 		device_xname(dev)));
   9891 	mii_pollstat(&sc->sc_mii);
   9892 	if (sc->sc_type == WM_T_82543) {
   9893 		int miistatus, active;
   9894 
   9895 		/*
   9896 		 * With 82543, we need to force speed and
   9897 		 * duplex on the MAC equal to what the PHY
   9898 		 * speed and duplex configuration is.
   9899 		 */
   9900 		miistatus = sc->sc_mii.mii_media_status;
   9901 
   9902 		if (miistatus & IFM_ACTIVE) {
   9903 			active = sc->sc_mii.mii_media_active;
   9904 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9905 			switch (IFM_SUBTYPE(active)) {
   9906 			case IFM_10_T:
   9907 				sc->sc_ctrl |= CTRL_SPEED_10;
   9908 				break;
   9909 			case IFM_100_TX:
   9910 				sc->sc_ctrl |= CTRL_SPEED_100;
   9911 				break;
   9912 			case IFM_1000_T:
   9913 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9914 				break;
   9915 			default:
   9916 				/*
   9917 				 * Fiber?
   9918 				 * Shoud not enter here.
   9919 				 */
   9920 				device_printf(dev, "unknown media (%x)\n",
   9921 				    active);
   9922 				break;
   9923 			}
   9924 			if (active & IFM_FDX)
   9925 				sc->sc_ctrl |= CTRL_FD;
   9926 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9927 		}
   9928 	} else if (sc->sc_type == WM_T_PCH) {
   9929 		wm_k1_gig_workaround_hv(sc,
   9930 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9931 	}
   9932 
   9933 	/*
   9934 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9935 	 * aggressive resulting in many collisions. To avoid this, increase
   9936 	 * the IPG and reduce Rx latency in the PHY.
   9937 	 */
   9938 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9939 	    && link) {
   9940 		uint32_t tipg_reg;
   9941 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9942 		bool fdx;
   9943 		uint16_t emi_addr, emi_val;
   9944 
   9945 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9946 		tipg_reg &= ~TIPG_IPGT_MASK;
   9947 		fdx = status & STATUS_FD;
   9948 
   9949 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9950 			tipg_reg |= 0xff;
   9951 			/* Reduce Rx latency in analog PHY */
   9952 			emi_val = 0;
   9953 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9954 		    fdx && speed != STATUS_SPEED_1000) {
   9955 			tipg_reg |= 0xc;
   9956 			emi_val = 1;
   9957 		} else {
   9958 			/* Roll back the default values */
   9959 			tipg_reg |= 0x08;
   9960 			emi_val = 1;
   9961 		}
   9962 
   9963 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9964 
   9965 		rv = sc->phy.acquire(sc);
   9966 		if (rv)
   9967 			return;
   9968 
   9969 		if (sc->sc_type == WM_T_PCH2)
   9970 			emi_addr = I82579_RX_CONFIG;
   9971 		else
   9972 			emi_addr = I217_RX_CONFIG;
   9973 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9974 
   9975 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9976 			uint16_t phy_reg;
   9977 
   9978 			sc->phy.readreg_locked(dev, 2,
   9979 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9980 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9981 			if (speed == STATUS_SPEED_100
   9982 			    || speed == STATUS_SPEED_10)
   9983 				phy_reg |= 0x3e8;
   9984 			else
   9985 				phy_reg |= 0xfa;
   9986 			sc->phy.writereg_locked(dev, 2,
   9987 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9988 
   9989 			if (speed == STATUS_SPEED_1000) {
   9990 				sc->phy.readreg_locked(dev, 2,
   9991 				    HV_PM_CTRL, &phy_reg);
   9992 
   9993 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9994 
   9995 				sc->phy.writereg_locked(dev, 2,
   9996 				    HV_PM_CTRL, phy_reg);
   9997 			}
   9998 		}
   9999 		sc->phy.release(sc);
   10000 
   10001 		if (rv)
   10002 			return;
   10003 
   10004 		if (sc->sc_type >= WM_T_PCH_SPT) {
   10005 			uint16_t data, ptr_gap;
   10006 
   10007 			if (speed == STATUS_SPEED_1000) {
   10008 				rv = sc->phy.acquire(sc);
   10009 				if (rv)
   10010 					return;
   10011 
   10012 				rv = sc->phy.readreg_locked(dev, 2,
   10013 				    I82579_UNKNOWN1, &data);
   10014 				if (rv) {
   10015 					sc->phy.release(sc);
   10016 					return;
   10017 				}
   10018 
   10019 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   10020 				if (ptr_gap < 0x18) {
   10021 					data &= ~(0x3ff << 2);
   10022 					data |= (0x18 << 2);
   10023 					rv = sc->phy.writereg_locked(dev,
   10024 					    2, I82579_UNKNOWN1, data);
   10025 				}
   10026 				sc->phy.release(sc);
   10027 				if (rv)
   10028 					return;
   10029 			} else {
   10030 				rv = sc->phy.acquire(sc);
   10031 				if (rv)
   10032 					return;
   10033 
   10034 				rv = sc->phy.writereg_locked(dev, 2,
   10035 				    I82579_UNKNOWN1, 0xc023);
   10036 				sc->phy.release(sc);
   10037 				if (rv)
   10038 					return;
   10039 
   10040 			}
   10041 		}
   10042 	}
   10043 
   10044 	/*
   10045 	 * I217 Packet Loss issue:
   10046 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   10047 	 * on power up.
   10048 	 * Set the Beacon Duration for I217 to 8 usec
   10049 	 */
   10050 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10051 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   10052 		reg &= ~FEXTNVM4_BEACON_DURATION;
   10053 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   10054 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   10055 	}
   10056 
   10057 	/* Work-around I218 hang issue */
   10058 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   10059 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   10060 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   10061 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   10062 		wm_k1_workaround_lpt_lp(sc, link);
   10063 
   10064 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10065 		/*
   10066 		 * Set platform power management values for Latency
   10067 		 * Tolerance Reporting (LTR)
   10068 		 */
   10069 		wm_platform_pm_pch_lpt(sc,
   10070 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   10071 	}
   10072 
   10073 	/* Clear link partner's EEE ability */
   10074 	sc->eee_lp_ability = 0;
   10075 
   10076 	/* FEXTNVM6 K1-off workaround */
   10077 	if (sc->sc_type == WM_T_PCH_SPT) {
   10078 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   10079 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   10080 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   10081 		else
   10082 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   10083 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   10084 	}
   10085 
   10086 	if (!link)
   10087 		return;
   10088 
   10089 	switch (sc->sc_type) {
   10090 	case WM_T_PCH2:
   10091 		wm_k1_workaround_lv(sc);
   10092 		/* FALLTHROUGH */
   10093 	case WM_T_PCH:
   10094 		if (sc->sc_phytype == WMPHY_82578)
   10095 			wm_link_stall_workaround_hv(sc);
   10096 		break;
   10097 	default:
   10098 		break;
   10099 	}
   10100 
   10101 	/* Enable/Disable EEE after link up */
   10102 	if (sc->sc_phytype > WMPHY_82579)
   10103 		wm_set_eee_pchlan(sc);
   10104 }
   10105 
   10106 /*
   10107  * wm_linkintr_tbi:
   10108  *
   10109  *	Helper; handle link interrupts for TBI mode.
   10110  */
   10111 static void
   10112 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   10113 {
   10114 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10115 	uint32_t status;
   10116 
   10117 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10118 		__func__));
   10119 
   10120 	status = CSR_READ(sc, WMREG_STATUS);
   10121 	if (icr & ICR_LSC) {
   10122 		wm_check_for_link(sc);
   10123 		if (status & STATUS_LU) {
   10124 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   10125 				device_xname(sc->sc_dev),
   10126 				(status & STATUS_FD) ? "FDX" : "HDX"));
   10127 			/*
   10128 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10129 			 * so we should update sc->sc_ctrl
   10130 			 */
   10131 
   10132 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10133 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10134 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10135 			if (status & STATUS_FD)
   10136 				sc->sc_tctl |=
   10137 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10138 			else
   10139 				sc->sc_tctl |=
   10140 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10141 			if (sc->sc_ctrl & CTRL_TFCE)
   10142 				sc->sc_fcrtl |= FCRTL_XONE;
   10143 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10144 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10145 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   10146 			sc->sc_tbi_linkup = 1;
   10147 			if_link_state_change(ifp, LINK_STATE_UP);
   10148 		} else {
   10149 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10150 				device_xname(sc->sc_dev)));
   10151 			sc->sc_tbi_linkup = 0;
   10152 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10153 		}
   10154 		/* Update LED */
   10155 		wm_tbi_serdes_set_linkled(sc);
   10156 	} else if (icr & ICR_RXSEQ)
   10157 		DPRINTF(sc, WM_DEBUG_LINK,
   10158 		    ("%s: LINK: Receive sequence error\n",
   10159 			device_xname(sc->sc_dev)));
   10160 }
   10161 
   10162 /*
   10163  * wm_linkintr_serdes:
   10164  *
   10165  *	Helper; handle link interrupts for TBI mode.
   10166  */
   10167 static void
   10168 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   10169 {
   10170 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10171 	struct mii_data *mii = &sc->sc_mii;
   10172 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10173 	uint32_t pcs_adv, pcs_lpab, reg;
   10174 
   10175 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10176 		__func__));
   10177 
   10178 	if (icr & ICR_LSC) {
   10179 		/* Check PCS */
   10180 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10181 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   10182 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   10183 				device_xname(sc->sc_dev)));
   10184 			mii->mii_media_status |= IFM_ACTIVE;
   10185 			sc->sc_tbi_linkup = 1;
   10186 			if_link_state_change(ifp, LINK_STATE_UP);
   10187 		} else {
   10188 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10189 				device_xname(sc->sc_dev)));
   10190 			mii->mii_media_status |= IFM_NONE;
   10191 			sc->sc_tbi_linkup = 0;
   10192 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10193 			wm_tbi_serdes_set_linkled(sc);
   10194 			return;
   10195 		}
   10196 		mii->mii_media_active |= IFM_1000_SX;
   10197 		if ((reg & PCS_LSTS_FDX) != 0)
   10198 			mii->mii_media_active |= IFM_FDX;
   10199 		else
   10200 			mii->mii_media_active |= IFM_HDX;
   10201 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10202 			/* Check flow */
   10203 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10204 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10205 				DPRINTF(sc, WM_DEBUG_LINK,
   10206 				    ("XXX LINKOK but not ACOMP\n"));
   10207 				return;
   10208 			}
   10209 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10210 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10211 			DPRINTF(sc, WM_DEBUG_LINK,
   10212 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   10213 			if ((pcs_adv & TXCW_SYM_PAUSE)
   10214 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10215 				mii->mii_media_active |= IFM_FLOW
   10216 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10217 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10218 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10219 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   10220 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10221 				mii->mii_media_active |= IFM_FLOW
   10222 				    | IFM_ETH_TXPAUSE;
   10223 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   10224 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10225 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10226 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10227 				mii->mii_media_active |= IFM_FLOW
   10228 				    | IFM_ETH_RXPAUSE;
   10229 		}
   10230 		/* Update LED */
   10231 		wm_tbi_serdes_set_linkled(sc);
   10232 	} else
   10233 		DPRINTF(sc, WM_DEBUG_LINK,
   10234 		    ("%s: LINK: Receive sequence error\n",
   10235 		    device_xname(sc->sc_dev)));
   10236 }
   10237 
   10238 /*
   10239  * wm_linkintr:
   10240  *
   10241  *	Helper; handle link interrupts.
   10242  */
   10243 static void
   10244 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   10245 {
   10246 
   10247 	KASSERT(WM_CORE_LOCKED(sc));
   10248 
   10249 	if (sc->sc_flags & WM_F_HAS_MII)
   10250 		wm_linkintr_gmii(sc, icr);
   10251 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10252 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   10253 		wm_linkintr_serdes(sc, icr);
   10254 	else
   10255 		wm_linkintr_tbi(sc, icr);
   10256 }
   10257 
   10258 
   10259 static inline void
   10260 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   10261 {
   10262 
   10263 	if (wmq->wmq_txrx_use_workqueue)
   10264 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
   10265 	else
   10266 		softint_schedule(wmq->wmq_si);
   10267 }
   10268 
   10269 static inline void
   10270 wm_legacy_intr_disable(struct wm_softc *sc)
   10271 {
   10272 
   10273 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   10274 }
   10275 
   10276 static inline void
   10277 wm_legacy_intr_enable(struct wm_softc *sc)
   10278 {
   10279 
   10280 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   10281 }
   10282 
   10283 /*
   10284  * wm_intr_legacy:
   10285  *
   10286  *	Interrupt service routine for INTx and MSI.
   10287  */
   10288 static int
   10289 wm_intr_legacy(void *arg)
   10290 {
   10291 	struct wm_softc *sc = arg;
   10292 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10293 	struct wm_queue *wmq = &sc->sc_queue[0];
   10294 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10295 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10296 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10297 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10298 	uint32_t icr, rndval = 0;
   10299 	bool more = false;
   10300 
   10301 	icr = CSR_READ(sc, WMREG_ICR);
   10302 	if ((icr & sc->sc_icr) == 0)
   10303 		return 0;
   10304 
   10305 	DPRINTF(sc, WM_DEBUG_TX,
   10306 	    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   10307 	if (rndval == 0)
   10308 		rndval = icr;
   10309 
   10310 	mutex_enter(txq->txq_lock);
   10311 
   10312 	if (txq->txq_stopping) {
   10313 		mutex_exit(txq->txq_lock);
   10314 		return 1;
   10315 	}
   10316 
   10317 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10318 	if (icr & ICR_TXDW) {
   10319 		DPRINTF(sc, WM_DEBUG_TX,
   10320 		    ("%s: TX: got TXDW interrupt\n",
   10321 			device_xname(sc->sc_dev)));
   10322 		WM_Q_EVCNT_INCR(txq, txdw);
   10323 	}
   10324 #endif
   10325 	if (txlimit > 0) {
   10326 		more |= wm_txeof(txq, txlimit);
   10327 		if (!IF_IS_EMPTY(&ifp->if_snd))
   10328 			more = true;
   10329 	} else
   10330 		more = true;
   10331 	mutex_exit(txq->txq_lock);
   10332 
   10333 	mutex_enter(rxq->rxq_lock);
   10334 
   10335 	if (rxq->rxq_stopping) {
   10336 		mutex_exit(rxq->rxq_lock);
   10337 		return 1;
   10338 	}
   10339 
   10340 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10341 	if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   10342 		DPRINTF(sc, WM_DEBUG_RX,
   10343 		    ("%s: RX: got Rx intr %#" __PRIxBIT "\n",
   10344 			device_xname(sc->sc_dev),
   10345 			icr & (ICR_RXDMT0 | ICR_RXT0)));
   10346 		WM_Q_EVCNT_INCR(rxq, intr);
   10347 	}
   10348 #endif
   10349 	if (rxlimit > 0) {
   10350 		/*
   10351 		 * wm_rxeof() does *not* call upper layer functions directly,
   10352 		 * as if_percpuq_enqueue() just call softint_schedule().
   10353 		 * So, we can call wm_rxeof() in interrupt context.
   10354 		 */
   10355 		more = wm_rxeof(rxq, rxlimit);
   10356 	} else
   10357 		more = true;
   10358 
   10359 	mutex_exit(rxq->rxq_lock);
   10360 
   10361 	WM_CORE_LOCK(sc);
   10362 
   10363 	if (sc->sc_core_stopping) {
   10364 		WM_CORE_UNLOCK(sc);
   10365 		return 1;
   10366 	}
   10367 
   10368 	if (icr & (ICR_LSC | ICR_RXSEQ)) {
   10369 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10370 		wm_linkintr(sc, icr);
   10371 	}
   10372 	if ((icr & ICR_GPI(0)) != 0)
   10373 		device_printf(sc->sc_dev, "got module interrupt\n");
   10374 
   10375 	WM_CORE_UNLOCK(sc);
   10376 
   10377 	if (icr & ICR_RXO) {
   10378 #if defined(WM_DEBUG)
   10379 		log(LOG_WARNING, "%s: Receive overrun\n",
   10380 		    device_xname(sc->sc_dev));
   10381 #endif /* defined(WM_DEBUG) */
   10382 	}
   10383 
   10384 	rnd_add_uint32(&sc->rnd_source, rndval);
   10385 
   10386 	if (more) {
   10387 		/* Try to get more packets going. */
   10388 		wm_legacy_intr_disable(sc);
   10389 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10390 		wm_sched_handle_queue(sc, wmq);
   10391 	}
   10392 
   10393 	return 1;
   10394 }
   10395 
   10396 static inline void
   10397 wm_txrxintr_disable(struct wm_queue *wmq)
   10398 {
   10399 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10400 
   10401 	if (__predict_false(!wm_is_using_msix(sc))) {
   10402 		wm_legacy_intr_disable(sc);
   10403 		return;
   10404 	}
   10405 
   10406 	if (sc->sc_type == WM_T_82574)
   10407 		CSR_WRITE(sc, WMREG_IMC,
   10408 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   10409 	else if (sc->sc_type == WM_T_82575)
   10410 		CSR_WRITE(sc, WMREG_EIMC,
   10411 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10412 	else
   10413 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   10414 }
   10415 
   10416 static inline void
   10417 wm_txrxintr_enable(struct wm_queue *wmq)
   10418 {
   10419 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10420 
   10421 	wm_itrs_calculate(sc, wmq);
   10422 
   10423 	if (__predict_false(!wm_is_using_msix(sc))) {
   10424 		wm_legacy_intr_enable(sc);
   10425 		return;
   10426 	}
   10427 
   10428 	/*
   10429 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   10430 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   10431 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   10432 	 * while each wm_handle_queue(wmq) is runnig.
   10433 	 */
   10434 	if (sc->sc_type == WM_T_82574)
   10435 		CSR_WRITE(sc, WMREG_IMS,
   10436 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   10437 	else if (sc->sc_type == WM_T_82575)
   10438 		CSR_WRITE(sc, WMREG_EIMS,
   10439 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10440 	else
   10441 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   10442 }
   10443 
   10444 static int
   10445 wm_txrxintr_msix(void *arg)
   10446 {
   10447 	struct wm_queue *wmq = arg;
   10448 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10449 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10450 	struct wm_softc *sc = txq->txq_sc;
   10451 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10452 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10453 	bool txmore;
   10454 	bool rxmore;
   10455 
   10456 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   10457 
   10458 	DPRINTF(sc, WM_DEBUG_TX,
   10459 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   10460 
   10461 	wm_txrxintr_disable(wmq);
   10462 
   10463 	mutex_enter(txq->txq_lock);
   10464 
   10465 	if (txq->txq_stopping) {
   10466 		mutex_exit(txq->txq_lock);
   10467 		return 1;
   10468 	}
   10469 
   10470 	WM_Q_EVCNT_INCR(txq, txdw);
   10471 	if (txlimit > 0) {
   10472 		txmore = wm_txeof(txq, txlimit);
   10473 		/* wm_deferred start() is done in wm_handle_queue(). */
   10474 	} else
   10475 		txmore = true;
   10476 	mutex_exit(txq->txq_lock);
   10477 
   10478 	DPRINTF(sc, WM_DEBUG_RX,
   10479 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   10480 	mutex_enter(rxq->rxq_lock);
   10481 
   10482 	if (rxq->rxq_stopping) {
   10483 		mutex_exit(rxq->rxq_lock);
   10484 		return 1;
   10485 	}
   10486 
   10487 	WM_Q_EVCNT_INCR(rxq, intr);
   10488 	if (rxlimit > 0) {
   10489 		rxmore = wm_rxeof(rxq, rxlimit);
   10490 	} else
   10491 		rxmore = true;
   10492 	mutex_exit(rxq->rxq_lock);
   10493 
   10494 	wm_itrs_writereg(sc, wmq);
   10495 
   10496 	if (txmore || rxmore) {
   10497 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10498 		wm_sched_handle_queue(sc, wmq);
   10499 	} else
   10500 		wm_txrxintr_enable(wmq);
   10501 
   10502 	return 1;
   10503 }
   10504 
   10505 static void
   10506 wm_handle_queue(void *arg)
   10507 {
   10508 	struct wm_queue *wmq = arg;
   10509 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10510 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10511 	struct wm_softc *sc = txq->txq_sc;
   10512 	u_int txlimit = sc->sc_tx_process_limit;
   10513 	u_int rxlimit = sc->sc_rx_process_limit;
   10514 	bool txmore;
   10515 	bool rxmore;
   10516 
   10517 	mutex_enter(txq->txq_lock);
   10518 	if (txq->txq_stopping) {
   10519 		mutex_exit(txq->txq_lock);
   10520 		return;
   10521 	}
   10522 	txmore = wm_txeof(txq, txlimit);
   10523 	wm_deferred_start_locked(txq);
   10524 	mutex_exit(txq->txq_lock);
   10525 
   10526 	mutex_enter(rxq->rxq_lock);
   10527 	if (rxq->rxq_stopping) {
   10528 		mutex_exit(rxq->rxq_lock);
   10529 		return;
   10530 	}
   10531 	WM_Q_EVCNT_INCR(rxq, defer);
   10532 	rxmore = wm_rxeof(rxq, rxlimit);
   10533 	mutex_exit(rxq->rxq_lock);
   10534 
   10535 	if (txmore || rxmore) {
   10536 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10537 		wm_sched_handle_queue(sc, wmq);
   10538 	} else
   10539 		wm_txrxintr_enable(wmq);
   10540 }
   10541 
   10542 static void
   10543 wm_handle_queue_work(struct work *wk, void *context)
   10544 {
   10545 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   10546 
   10547 	/*
   10548 	 * "enqueued flag" is not required here.
   10549 	 */
   10550 	wm_handle_queue(wmq);
   10551 }
   10552 
   10553 /*
   10554  * wm_linkintr_msix:
   10555  *
   10556  *	Interrupt service routine for link status change for MSI-X.
   10557  */
   10558 static int
   10559 wm_linkintr_msix(void *arg)
   10560 {
   10561 	struct wm_softc *sc = arg;
   10562 	uint32_t reg;
   10563 	bool has_rxo;
   10564 
   10565 	reg = CSR_READ(sc, WMREG_ICR);
   10566 	WM_CORE_LOCK(sc);
   10567 	DPRINTF(sc, WM_DEBUG_LINK,
   10568 	    ("%s: LINK: got link intr. ICR = %08x\n",
   10569 		device_xname(sc->sc_dev), reg));
   10570 
   10571 	if (sc->sc_core_stopping)
   10572 		goto out;
   10573 
   10574 	if ((reg & ICR_LSC) != 0) {
   10575 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10576 		wm_linkintr(sc, ICR_LSC);
   10577 	}
   10578 	if ((reg & ICR_GPI(0)) != 0)
   10579 		device_printf(sc->sc_dev, "got module interrupt\n");
   10580 
   10581 	/*
   10582 	 * XXX 82574 MSI-X mode workaround
   10583 	 *
   10584 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   10585 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   10586 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   10587 	 * interrupts by writing WMREG_ICS to process receive packets.
   10588 	 */
   10589 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   10590 #if defined(WM_DEBUG)
   10591 		log(LOG_WARNING, "%s: Receive overrun\n",
   10592 		    device_xname(sc->sc_dev));
   10593 #endif /* defined(WM_DEBUG) */
   10594 
   10595 		has_rxo = true;
   10596 		/*
   10597 		 * The RXO interrupt is very high rate when receive traffic is
   10598 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   10599 		 * interrupts. ICR_OTHER will be enabled at the end of
   10600 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   10601 		 * ICR_RXQ(1) interrupts.
   10602 		 */
   10603 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   10604 
   10605 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   10606 	}
   10607 
   10608 
   10609 
   10610 out:
   10611 	WM_CORE_UNLOCK(sc);
   10612 
   10613 	if (sc->sc_type == WM_T_82574) {
   10614 		if (!has_rxo)
   10615 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   10616 		else
   10617 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   10618 	} else if (sc->sc_type == WM_T_82575)
   10619 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   10620 	else
   10621 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   10622 
   10623 	return 1;
   10624 }
   10625 
   10626 /*
   10627  * Media related.
   10628  * GMII, SGMII, TBI (and SERDES)
   10629  */
   10630 
   10631 /* Common */
   10632 
   10633 /*
   10634  * wm_tbi_serdes_set_linkled:
   10635  *
   10636  *	Update the link LED on TBI and SERDES devices.
   10637  */
   10638 static void
   10639 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   10640 {
   10641 
   10642 	if (sc->sc_tbi_linkup)
   10643 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   10644 	else
   10645 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   10646 
   10647 	/* 82540 or newer devices are active low */
   10648 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   10649 
   10650 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10651 }
   10652 
   10653 /* GMII related */
   10654 
   10655 /*
   10656  * wm_gmii_reset:
   10657  *
   10658  *	Reset the PHY.
   10659  */
   10660 static void
   10661 wm_gmii_reset(struct wm_softc *sc)
   10662 {
   10663 	uint32_t reg;
   10664 	int rv;
   10665 
   10666 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10667 		device_xname(sc->sc_dev), __func__));
   10668 
   10669 	rv = sc->phy.acquire(sc);
   10670 	if (rv != 0) {
   10671 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10672 		    __func__);
   10673 		return;
   10674 	}
   10675 
   10676 	switch (sc->sc_type) {
   10677 	case WM_T_82542_2_0:
   10678 	case WM_T_82542_2_1:
   10679 		/* null */
   10680 		break;
   10681 	case WM_T_82543:
   10682 		/*
   10683 		 * With 82543, we need to force speed and duplex on the MAC
   10684 		 * equal to what the PHY speed and duplex configuration is.
   10685 		 * In addition, we need to perform a hardware reset on the PHY
   10686 		 * to take it out of reset.
   10687 		 */
   10688 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10689 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10690 
   10691 		/* The PHY reset pin is active-low. */
   10692 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10693 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   10694 		    CTRL_EXT_SWDPIN(4));
   10695 		reg |= CTRL_EXT_SWDPIO(4);
   10696 
   10697 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10698 		CSR_WRITE_FLUSH(sc);
   10699 		delay(10*1000);
   10700 
   10701 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   10702 		CSR_WRITE_FLUSH(sc);
   10703 		delay(150);
   10704 #if 0
   10705 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   10706 #endif
   10707 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   10708 		break;
   10709 	case WM_T_82544:	/* Reset 10000us */
   10710 	case WM_T_82540:
   10711 	case WM_T_82545:
   10712 	case WM_T_82545_3:
   10713 	case WM_T_82546:
   10714 	case WM_T_82546_3:
   10715 	case WM_T_82541:
   10716 	case WM_T_82541_2:
   10717 	case WM_T_82547:
   10718 	case WM_T_82547_2:
   10719 	case WM_T_82571:	/* Reset 100us */
   10720 	case WM_T_82572:
   10721 	case WM_T_82573:
   10722 	case WM_T_82574:
   10723 	case WM_T_82575:
   10724 	case WM_T_82576:
   10725 	case WM_T_82580:
   10726 	case WM_T_I350:
   10727 	case WM_T_I354:
   10728 	case WM_T_I210:
   10729 	case WM_T_I211:
   10730 	case WM_T_82583:
   10731 	case WM_T_80003:
   10732 		/* Generic reset */
   10733 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10734 		CSR_WRITE_FLUSH(sc);
   10735 		delay(20000);
   10736 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10737 		CSR_WRITE_FLUSH(sc);
   10738 		delay(20000);
   10739 
   10740 		if ((sc->sc_type == WM_T_82541)
   10741 		    || (sc->sc_type == WM_T_82541_2)
   10742 		    || (sc->sc_type == WM_T_82547)
   10743 		    || (sc->sc_type == WM_T_82547_2)) {
   10744 			/* Workaround for igp are done in igp_reset() */
   10745 			/* XXX add code to set LED after phy reset */
   10746 		}
   10747 		break;
   10748 	case WM_T_ICH8:
   10749 	case WM_T_ICH9:
   10750 	case WM_T_ICH10:
   10751 	case WM_T_PCH:
   10752 	case WM_T_PCH2:
   10753 	case WM_T_PCH_LPT:
   10754 	case WM_T_PCH_SPT:
   10755 	case WM_T_PCH_CNP:
   10756 		/* Generic reset */
   10757 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10758 		CSR_WRITE_FLUSH(sc);
   10759 		delay(100);
   10760 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10761 		CSR_WRITE_FLUSH(sc);
   10762 		delay(150);
   10763 		break;
   10764 	default:
   10765 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   10766 		    __func__);
   10767 		break;
   10768 	}
   10769 
   10770 	sc->phy.release(sc);
   10771 
   10772 	/* get_cfg_done */
   10773 	wm_get_cfg_done(sc);
   10774 
   10775 	/* Extra setup */
   10776 	switch (sc->sc_type) {
   10777 	case WM_T_82542_2_0:
   10778 	case WM_T_82542_2_1:
   10779 	case WM_T_82543:
   10780 	case WM_T_82544:
   10781 	case WM_T_82540:
   10782 	case WM_T_82545:
   10783 	case WM_T_82545_3:
   10784 	case WM_T_82546:
   10785 	case WM_T_82546_3:
   10786 	case WM_T_82541_2:
   10787 	case WM_T_82547_2:
   10788 	case WM_T_82571:
   10789 	case WM_T_82572:
   10790 	case WM_T_82573:
   10791 	case WM_T_82574:
   10792 	case WM_T_82583:
   10793 	case WM_T_82575:
   10794 	case WM_T_82576:
   10795 	case WM_T_82580:
   10796 	case WM_T_I350:
   10797 	case WM_T_I354:
   10798 	case WM_T_I210:
   10799 	case WM_T_I211:
   10800 	case WM_T_80003:
   10801 		/* Null */
   10802 		break;
   10803 	case WM_T_82541:
   10804 	case WM_T_82547:
   10805 		/* XXX Configure actively LED after PHY reset */
   10806 		break;
   10807 	case WM_T_ICH8:
   10808 	case WM_T_ICH9:
   10809 	case WM_T_ICH10:
   10810 	case WM_T_PCH:
   10811 	case WM_T_PCH2:
   10812 	case WM_T_PCH_LPT:
   10813 	case WM_T_PCH_SPT:
   10814 	case WM_T_PCH_CNP:
   10815 		wm_phy_post_reset(sc);
   10816 		break;
   10817 	default:
   10818 		panic("%s: unknown type\n", __func__);
   10819 		break;
   10820 	}
   10821 }
   10822 
   10823 /*
   10824  * Set up sc_phytype and mii_{read|write}reg.
   10825  *
   10826  *  To identify PHY type, correct read/write function should be selected.
   10827  * To select correct read/write function, PCI ID or MAC type are required
   10828  * without accessing PHY registers.
   10829  *
   10830  *  On the first call of this function, PHY ID is not known yet. Check
   10831  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   10832  * result might be incorrect.
   10833  *
   10834  *  In the second call, PHY OUI and model is used to identify PHY type.
   10835  * It might not be perfect because of the lack of compared entry, but it
   10836  * would be better than the first call.
   10837  *
   10838  *  If the detected new result and previous assumption is different,
   10839  * a diagnostic message will be printed.
   10840  */
   10841 static void
   10842 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   10843     uint16_t phy_model)
   10844 {
   10845 	device_t dev = sc->sc_dev;
   10846 	struct mii_data *mii = &sc->sc_mii;
   10847 	uint16_t new_phytype = WMPHY_UNKNOWN;
   10848 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   10849 	mii_readreg_t new_readreg;
   10850 	mii_writereg_t new_writereg;
   10851 	bool dodiag = true;
   10852 
   10853 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10854 		device_xname(sc->sc_dev), __func__));
   10855 
   10856 	/*
   10857 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   10858 	 * incorrect. So don't print diag output when it's 2nd call.
   10859 	 */
   10860 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   10861 		dodiag = false;
   10862 
   10863 	if (mii->mii_readreg == NULL) {
   10864 		/*
   10865 		 *  This is the first call of this function. For ICH and PCH
   10866 		 * variants, it's difficult to determine the PHY access method
   10867 		 * by sc_type, so use the PCI product ID for some devices.
   10868 		 */
   10869 
   10870 		switch (sc->sc_pcidevid) {
   10871 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   10872 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   10873 			/* 82577 */
   10874 			new_phytype = WMPHY_82577;
   10875 			break;
   10876 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   10877 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   10878 			/* 82578 */
   10879 			new_phytype = WMPHY_82578;
   10880 			break;
   10881 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   10882 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   10883 			/* 82579 */
   10884 			new_phytype = WMPHY_82579;
   10885 			break;
   10886 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   10887 		case PCI_PRODUCT_INTEL_82801I_BM:
   10888 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   10889 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   10890 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   10891 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   10892 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   10893 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   10894 			/* ICH8, 9, 10 with 82567 */
   10895 			new_phytype = WMPHY_BM;
   10896 			break;
   10897 		default:
   10898 			break;
   10899 		}
   10900 	} else {
   10901 		/* It's not the first call. Use PHY OUI and model */
   10902 		switch (phy_oui) {
   10903 		case MII_OUI_ATTANSIC: /* atphy(4) */
   10904 			switch (phy_model) {
   10905 			case MII_MODEL_ATTANSIC_AR8021:
   10906 				new_phytype = WMPHY_82578;
   10907 				break;
   10908 			default:
   10909 				break;
   10910 			}
   10911 			break;
   10912 		case MII_OUI_xxMARVELL:
   10913 			switch (phy_model) {
   10914 			case MII_MODEL_xxMARVELL_I210:
   10915 				new_phytype = WMPHY_I210;
   10916 				break;
   10917 			case MII_MODEL_xxMARVELL_E1011:
   10918 			case MII_MODEL_xxMARVELL_E1000_3:
   10919 			case MII_MODEL_xxMARVELL_E1000_5:
   10920 			case MII_MODEL_xxMARVELL_E1112:
   10921 				new_phytype = WMPHY_M88;
   10922 				break;
   10923 			case MII_MODEL_xxMARVELL_E1149:
   10924 				new_phytype = WMPHY_BM;
   10925 				break;
   10926 			case MII_MODEL_xxMARVELL_E1111:
   10927 			case MII_MODEL_xxMARVELL_I347:
   10928 			case MII_MODEL_xxMARVELL_E1512:
   10929 			case MII_MODEL_xxMARVELL_E1340M:
   10930 			case MII_MODEL_xxMARVELL_E1543:
   10931 				new_phytype = WMPHY_M88;
   10932 				break;
   10933 			case MII_MODEL_xxMARVELL_I82563:
   10934 				new_phytype = WMPHY_GG82563;
   10935 				break;
   10936 			default:
   10937 				break;
   10938 			}
   10939 			break;
   10940 		case MII_OUI_INTEL:
   10941 			switch (phy_model) {
   10942 			case MII_MODEL_INTEL_I82577:
   10943 				new_phytype = WMPHY_82577;
   10944 				break;
   10945 			case MII_MODEL_INTEL_I82579:
   10946 				new_phytype = WMPHY_82579;
   10947 				break;
   10948 			case MII_MODEL_INTEL_I217:
   10949 				new_phytype = WMPHY_I217;
   10950 				break;
   10951 			case MII_MODEL_INTEL_I82580:
   10952 				new_phytype = WMPHY_82580;
   10953 				break;
   10954 			case MII_MODEL_INTEL_I350:
   10955 				new_phytype = WMPHY_I350;
   10956 				break;
   10957 			default:
   10958 				break;
   10959 			}
   10960 			break;
   10961 		case MII_OUI_yyINTEL:
   10962 			switch (phy_model) {
   10963 			case MII_MODEL_yyINTEL_I82562G:
   10964 			case MII_MODEL_yyINTEL_I82562EM:
   10965 			case MII_MODEL_yyINTEL_I82562ET:
   10966 				new_phytype = WMPHY_IFE;
   10967 				break;
   10968 			case MII_MODEL_yyINTEL_IGP01E1000:
   10969 				new_phytype = WMPHY_IGP;
   10970 				break;
   10971 			case MII_MODEL_yyINTEL_I82566:
   10972 				new_phytype = WMPHY_IGP_3;
   10973 				break;
   10974 			default:
   10975 				break;
   10976 			}
   10977 			break;
   10978 		default:
   10979 			break;
   10980 		}
   10981 
   10982 		if (dodiag) {
   10983 			if (new_phytype == WMPHY_UNKNOWN)
   10984 				aprint_verbose_dev(dev,
   10985 				    "%s: Unknown PHY model. OUI=%06x, "
   10986 				    "model=%04x\n", __func__, phy_oui,
   10987 				    phy_model);
   10988 
   10989 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10990 			    && (sc->sc_phytype != new_phytype)) {
   10991 				aprint_error_dev(dev, "Previously assumed PHY "
   10992 				    "type(%u) was incorrect. PHY type from PHY"
   10993 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   10994 			}
   10995 		}
   10996 	}
   10997 
   10998 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10999 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   11000 		/* SGMII */
   11001 		new_readreg = wm_sgmii_readreg;
   11002 		new_writereg = wm_sgmii_writereg;
   11003 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11004 		/* BM2 (phyaddr == 1) */
   11005 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11006 		    && (new_phytype != WMPHY_BM)
   11007 		    && (new_phytype != WMPHY_UNKNOWN))
   11008 			doubt_phytype = new_phytype;
   11009 		new_phytype = WMPHY_BM;
   11010 		new_readreg = wm_gmii_bm_readreg;
   11011 		new_writereg = wm_gmii_bm_writereg;
   11012 	} else if (sc->sc_type >= WM_T_PCH) {
   11013 		/* All PCH* use _hv_ */
   11014 		new_readreg = wm_gmii_hv_readreg;
   11015 		new_writereg = wm_gmii_hv_writereg;
   11016 	} else if (sc->sc_type >= WM_T_ICH8) {
   11017 		/* non-82567 ICH8, 9 and 10 */
   11018 		new_readreg = wm_gmii_i82544_readreg;
   11019 		new_writereg = wm_gmii_i82544_writereg;
   11020 	} else if (sc->sc_type >= WM_T_80003) {
   11021 		/* 80003 */
   11022 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11023 		    && (new_phytype != WMPHY_GG82563)
   11024 		    && (new_phytype != WMPHY_UNKNOWN))
   11025 			doubt_phytype = new_phytype;
   11026 		new_phytype = WMPHY_GG82563;
   11027 		new_readreg = wm_gmii_i80003_readreg;
   11028 		new_writereg = wm_gmii_i80003_writereg;
   11029 	} else if (sc->sc_type >= WM_T_I210) {
   11030 		/* I210 and I211 */
   11031 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11032 		    && (new_phytype != WMPHY_I210)
   11033 		    && (new_phytype != WMPHY_UNKNOWN))
   11034 			doubt_phytype = new_phytype;
   11035 		new_phytype = WMPHY_I210;
   11036 		new_readreg = wm_gmii_gs40g_readreg;
   11037 		new_writereg = wm_gmii_gs40g_writereg;
   11038 	} else if (sc->sc_type >= WM_T_82580) {
   11039 		/* 82580, I350 and I354 */
   11040 		new_readreg = wm_gmii_82580_readreg;
   11041 		new_writereg = wm_gmii_82580_writereg;
   11042 	} else if (sc->sc_type >= WM_T_82544) {
   11043 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   11044 		new_readreg = wm_gmii_i82544_readreg;
   11045 		new_writereg = wm_gmii_i82544_writereg;
   11046 	} else {
   11047 		new_readreg = wm_gmii_i82543_readreg;
   11048 		new_writereg = wm_gmii_i82543_writereg;
   11049 	}
   11050 
   11051 	if (new_phytype == WMPHY_BM) {
   11052 		/* All BM use _bm_ */
   11053 		new_readreg = wm_gmii_bm_readreg;
   11054 		new_writereg = wm_gmii_bm_writereg;
   11055 	}
   11056 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   11057 		/* All PCH* use _hv_ */
   11058 		new_readreg = wm_gmii_hv_readreg;
   11059 		new_writereg = wm_gmii_hv_writereg;
   11060 	}
   11061 
   11062 	/* Diag output */
   11063 	if (dodiag) {
   11064 		if (doubt_phytype != WMPHY_UNKNOWN)
   11065 			aprint_error_dev(dev, "Assumed new PHY type was "
   11066 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   11067 			    new_phytype);
   11068 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11069 		    && (sc->sc_phytype != new_phytype))
   11070 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   11071 			    "was incorrect. New PHY type = %u\n",
   11072 			    sc->sc_phytype, new_phytype);
   11073 
   11074 		if ((mii->mii_readreg != NULL) &&
   11075 		    (new_phytype == WMPHY_UNKNOWN))
   11076 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   11077 
   11078 		if ((mii->mii_readreg != NULL) &&
   11079 		    (mii->mii_readreg != new_readreg))
   11080 			aprint_error_dev(dev, "Previously assumed PHY "
   11081 			    "read/write function was incorrect.\n");
   11082 	}
   11083 
   11084 	/* Update now */
   11085 	sc->sc_phytype = new_phytype;
   11086 	mii->mii_readreg = new_readreg;
   11087 	mii->mii_writereg = new_writereg;
   11088 	if (new_readreg == wm_gmii_hv_readreg) {
   11089 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   11090 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   11091 	} else if (new_readreg == wm_sgmii_readreg) {
   11092 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   11093 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   11094 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   11095 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   11096 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   11097 	}
   11098 }
   11099 
   11100 /*
   11101  * wm_get_phy_id_82575:
   11102  *
   11103  * Return PHY ID. Return -1 if it failed.
   11104  */
   11105 static int
   11106 wm_get_phy_id_82575(struct wm_softc *sc)
   11107 {
   11108 	uint32_t reg;
   11109 	int phyid = -1;
   11110 
   11111 	/* XXX */
   11112 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   11113 		return -1;
   11114 
   11115 	if (wm_sgmii_uses_mdio(sc)) {
   11116 		switch (sc->sc_type) {
   11117 		case WM_T_82575:
   11118 		case WM_T_82576:
   11119 			reg = CSR_READ(sc, WMREG_MDIC);
   11120 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   11121 			break;
   11122 		case WM_T_82580:
   11123 		case WM_T_I350:
   11124 		case WM_T_I354:
   11125 		case WM_T_I210:
   11126 		case WM_T_I211:
   11127 			reg = CSR_READ(sc, WMREG_MDICNFG);
   11128 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   11129 			break;
   11130 		default:
   11131 			return -1;
   11132 		}
   11133 	}
   11134 
   11135 	return phyid;
   11136 }
   11137 
   11138 /*
   11139  * wm_gmii_mediainit:
   11140  *
   11141  *	Initialize media for use on 1000BASE-T devices.
   11142  */
   11143 static void
   11144 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   11145 {
   11146 	device_t dev = sc->sc_dev;
   11147 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11148 	struct mii_data *mii = &sc->sc_mii;
   11149 
   11150 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11151 		device_xname(sc->sc_dev), __func__));
   11152 
   11153 	/* We have GMII. */
   11154 	sc->sc_flags |= WM_F_HAS_MII;
   11155 
   11156 	if (sc->sc_type == WM_T_80003)
   11157 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11158 	else
   11159 		sc->sc_tipg = TIPG_1000T_DFLT;
   11160 
   11161 	/*
   11162 	 * Let the chip set speed/duplex on its own based on
   11163 	 * signals from the PHY.
   11164 	 * XXXbouyer - I'm not sure this is right for the 80003,
   11165 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   11166 	 */
   11167 	sc->sc_ctrl |= CTRL_SLU;
   11168 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11169 
   11170 	/* Initialize our media structures and probe the GMII. */
   11171 	mii->mii_ifp = ifp;
   11172 
   11173 	mii->mii_statchg = wm_gmii_statchg;
   11174 
   11175 	/* get PHY control from SMBus to PCIe */
   11176 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   11177 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   11178 	    || (sc->sc_type == WM_T_PCH_CNP))
   11179 		wm_init_phy_workarounds_pchlan(sc);
   11180 
   11181 	wm_gmii_reset(sc);
   11182 
   11183 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11184 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   11185 	    wm_gmii_mediastatus, sc->sc_core_lock);
   11186 
   11187 	/* Setup internal SGMII PHY for SFP */
   11188 	wm_sgmii_sfp_preconfig(sc);
   11189 
   11190 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   11191 	    || (sc->sc_type == WM_T_82580)
   11192 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   11193 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   11194 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   11195 			/* Attach only one port */
   11196 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   11197 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11198 		} else {
   11199 			int i, id;
   11200 			uint32_t ctrl_ext;
   11201 
   11202 			id = wm_get_phy_id_82575(sc);
   11203 			if (id != -1) {
   11204 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   11205 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   11206 			}
   11207 			if ((id == -1)
   11208 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11209 				/* Power on sgmii phy if it is disabled */
   11210 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11211 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   11212 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   11213 				CSR_WRITE_FLUSH(sc);
   11214 				delay(300*1000); /* XXX too long */
   11215 
   11216 				/*
   11217 				 * From 1 to 8.
   11218 				 *
   11219 				 * I2C access fails with I2C register's ERROR
   11220 				 * bit set, so prevent error message while
   11221 				 * scanning.
   11222 				 */
   11223 				sc->phy.no_errprint = true;
   11224 				for (i = 1; i < 8; i++)
   11225 					mii_attach(sc->sc_dev, &sc->sc_mii,
   11226 					    0xffffffff, i, MII_OFFSET_ANY,
   11227 					    MIIF_DOPAUSE);
   11228 				sc->phy.no_errprint = false;
   11229 
   11230 				/* Restore previous sfp cage power state */
   11231 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11232 			}
   11233 		}
   11234 	} else
   11235 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11236 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11237 
   11238 	/*
   11239 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   11240 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   11241 	 */
   11242 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   11243 		|| (sc->sc_type == WM_T_PCH_SPT)
   11244 		|| (sc->sc_type == WM_T_PCH_CNP))
   11245 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11246 		wm_set_mdio_slow_mode_hv(sc);
   11247 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11248 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11249 	}
   11250 
   11251 	/*
   11252 	 * (For ICH8 variants)
   11253 	 * If PHY detection failed, use BM's r/w function and retry.
   11254 	 */
   11255 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11256 		/* if failed, retry with *_bm_* */
   11257 		aprint_verbose_dev(dev, "Assumed PHY access function "
   11258 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   11259 		    sc->sc_phytype);
   11260 		sc->sc_phytype = WMPHY_BM;
   11261 		mii->mii_readreg = wm_gmii_bm_readreg;
   11262 		mii->mii_writereg = wm_gmii_bm_writereg;
   11263 
   11264 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11265 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11266 	}
   11267 
   11268 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11269 		/* Any PHY wasn't found */
   11270 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   11271 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   11272 		sc->sc_phytype = WMPHY_NONE;
   11273 	} else {
   11274 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   11275 
   11276 		/*
   11277 		 * PHY found! Check PHY type again by the second call of
   11278 		 * wm_gmii_setup_phytype.
   11279 		 */
   11280 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   11281 		    child->mii_mpd_model);
   11282 
   11283 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   11284 	}
   11285 }
   11286 
   11287 /*
   11288  * wm_gmii_mediachange:	[ifmedia interface function]
   11289  *
   11290  *	Set hardware to newly-selected media on a 1000BASE-T device.
   11291  */
   11292 static int
   11293 wm_gmii_mediachange(struct ifnet *ifp)
   11294 {
   11295 	struct wm_softc *sc = ifp->if_softc;
   11296 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11297 	uint32_t reg;
   11298 	int rc;
   11299 
   11300 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11301 		device_xname(sc->sc_dev), __func__));
   11302 	if ((ifp->if_flags & IFF_UP) == 0)
   11303 		return 0;
   11304 
   11305 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   11306 	if ((sc->sc_type == WM_T_82580)
   11307 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   11308 	    || (sc->sc_type == WM_T_I211)) {
   11309 		reg = CSR_READ(sc, WMREG_PHPM);
   11310 		reg &= ~PHPM_GO_LINK_D;
   11311 		CSR_WRITE(sc, WMREG_PHPM, reg);
   11312 	}
   11313 
   11314 	/* Disable D0 LPLU. */
   11315 	wm_lplu_d0_disable(sc);
   11316 
   11317 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   11318 	sc->sc_ctrl |= CTRL_SLU;
   11319 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11320 	    || (sc->sc_type > WM_T_82543)) {
   11321 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   11322 	} else {
   11323 		sc->sc_ctrl &= ~CTRL_ASDE;
   11324 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   11325 		if (ife->ifm_media & IFM_FDX)
   11326 			sc->sc_ctrl |= CTRL_FD;
   11327 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   11328 		case IFM_10_T:
   11329 			sc->sc_ctrl |= CTRL_SPEED_10;
   11330 			break;
   11331 		case IFM_100_TX:
   11332 			sc->sc_ctrl |= CTRL_SPEED_100;
   11333 			break;
   11334 		case IFM_1000_T:
   11335 			sc->sc_ctrl |= CTRL_SPEED_1000;
   11336 			break;
   11337 		case IFM_NONE:
   11338 			/* There is no specific setting for IFM_NONE */
   11339 			break;
   11340 		default:
   11341 			panic("wm_gmii_mediachange: bad media 0x%x",
   11342 			    ife->ifm_media);
   11343 		}
   11344 	}
   11345 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11346 	CSR_WRITE_FLUSH(sc);
   11347 
   11348 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11349 		wm_serdes_mediachange(ifp);
   11350 
   11351 	if (sc->sc_type <= WM_T_82543)
   11352 		wm_gmii_reset(sc);
   11353 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   11354 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   11355 		/* allow time for SFP cage time to power up phy */
   11356 		delay(300 * 1000);
   11357 		wm_gmii_reset(sc);
   11358 	}
   11359 
   11360 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   11361 		return 0;
   11362 	return rc;
   11363 }
   11364 
   11365 /*
   11366  * wm_gmii_mediastatus:	[ifmedia interface function]
   11367  *
   11368  *	Get the current interface media status on a 1000BASE-T device.
   11369  */
   11370 static void
   11371 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11372 {
   11373 	struct wm_softc *sc = ifp->if_softc;
   11374 
   11375 	ether_mediastatus(ifp, ifmr);
   11376 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11377 	    | sc->sc_flowflags;
   11378 }
   11379 
   11380 #define	MDI_IO		CTRL_SWDPIN(2)
   11381 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   11382 #define	MDI_CLK		CTRL_SWDPIN(3)
   11383 
   11384 static void
   11385 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   11386 {
   11387 	uint32_t i, v;
   11388 
   11389 	v = CSR_READ(sc, WMREG_CTRL);
   11390 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11391 	v |= MDI_DIR | CTRL_SWDPIO(3);
   11392 
   11393 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   11394 		if (data & i)
   11395 			v |= MDI_IO;
   11396 		else
   11397 			v &= ~MDI_IO;
   11398 		CSR_WRITE(sc, WMREG_CTRL, v);
   11399 		CSR_WRITE_FLUSH(sc);
   11400 		delay(10);
   11401 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11402 		CSR_WRITE_FLUSH(sc);
   11403 		delay(10);
   11404 		CSR_WRITE(sc, WMREG_CTRL, v);
   11405 		CSR_WRITE_FLUSH(sc);
   11406 		delay(10);
   11407 	}
   11408 }
   11409 
   11410 static uint16_t
   11411 wm_i82543_mii_recvbits(struct wm_softc *sc)
   11412 {
   11413 	uint32_t v, i;
   11414 	uint16_t data = 0;
   11415 
   11416 	v = CSR_READ(sc, WMREG_CTRL);
   11417 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11418 	v |= CTRL_SWDPIO(3);
   11419 
   11420 	CSR_WRITE(sc, WMREG_CTRL, v);
   11421 	CSR_WRITE_FLUSH(sc);
   11422 	delay(10);
   11423 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11424 	CSR_WRITE_FLUSH(sc);
   11425 	delay(10);
   11426 	CSR_WRITE(sc, WMREG_CTRL, v);
   11427 	CSR_WRITE_FLUSH(sc);
   11428 	delay(10);
   11429 
   11430 	for (i = 0; i < 16; i++) {
   11431 		data <<= 1;
   11432 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11433 		CSR_WRITE_FLUSH(sc);
   11434 		delay(10);
   11435 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   11436 			data |= 1;
   11437 		CSR_WRITE(sc, WMREG_CTRL, v);
   11438 		CSR_WRITE_FLUSH(sc);
   11439 		delay(10);
   11440 	}
   11441 
   11442 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11443 	CSR_WRITE_FLUSH(sc);
   11444 	delay(10);
   11445 	CSR_WRITE(sc, WMREG_CTRL, v);
   11446 	CSR_WRITE_FLUSH(sc);
   11447 	delay(10);
   11448 
   11449 	return data;
   11450 }
   11451 
   11452 #undef MDI_IO
   11453 #undef MDI_DIR
   11454 #undef MDI_CLK
   11455 
   11456 /*
   11457  * wm_gmii_i82543_readreg:	[mii interface function]
   11458  *
   11459  *	Read a PHY register on the GMII (i82543 version).
   11460  */
   11461 static int
   11462 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11463 {
   11464 	struct wm_softc *sc = device_private(dev);
   11465 
   11466 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11467 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   11468 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   11469 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   11470 
   11471 	DPRINTF(sc, WM_DEBUG_GMII,
   11472 	    ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   11473 		device_xname(dev), phy, reg, *val));
   11474 
   11475 	return 0;
   11476 }
   11477 
   11478 /*
   11479  * wm_gmii_i82543_writereg:	[mii interface function]
   11480  *
   11481  *	Write a PHY register on the GMII (i82543 version).
   11482  */
   11483 static int
   11484 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   11485 {
   11486 	struct wm_softc *sc = device_private(dev);
   11487 
   11488 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11489 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   11490 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   11491 	    (MII_COMMAND_START << 30), 32);
   11492 
   11493 	return 0;
   11494 }
   11495 
   11496 /*
   11497  * wm_gmii_mdic_readreg:	[mii interface function]
   11498  *
   11499  *	Read a PHY register on the GMII.
   11500  */
   11501 static int
   11502 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11503 {
   11504 	struct wm_softc *sc = device_private(dev);
   11505 	uint32_t mdic = 0;
   11506 	int i;
   11507 
   11508 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11509 	    && (reg > MII_ADDRMASK)) {
   11510 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11511 		    __func__, sc->sc_phytype, reg);
   11512 		reg &= MII_ADDRMASK;
   11513 	}
   11514 
   11515 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   11516 	    MDIC_REGADD(reg));
   11517 
   11518 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11519 		delay(50);
   11520 		mdic = CSR_READ(sc, WMREG_MDIC);
   11521 		if (mdic & MDIC_READY)
   11522 			break;
   11523 	}
   11524 
   11525 	if ((mdic & MDIC_READY) == 0) {
   11526 		DPRINTF(sc, WM_DEBUG_GMII,
   11527 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   11528 			device_xname(dev), phy, reg));
   11529 		return ETIMEDOUT;
   11530 	} else if (mdic & MDIC_E) {
   11531 		/* This is normal if no PHY is present. */
   11532 		DPRINTF(sc, WM_DEBUG_GMII,
   11533 		    ("%s: MDIC read error: phy %d reg %d\n",
   11534 			device_xname(sc->sc_dev), phy, reg));
   11535 		return -1;
   11536 	} else
   11537 		*val = MDIC_DATA(mdic);
   11538 
   11539 	/*
   11540 	 * Allow some time after each MDIC transaction to avoid
   11541 	 * reading duplicate data in the next MDIC transaction.
   11542 	 */
   11543 	if (sc->sc_type == WM_T_PCH2)
   11544 		delay(100);
   11545 
   11546 	return 0;
   11547 }
   11548 
   11549 /*
   11550  * wm_gmii_mdic_writereg:	[mii interface function]
   11551  *
   11552  *	Write a PHY register on the GMII.
   11553  */
   11554 static int
   11555 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   11556 {
   11557 	struct wm_softc *sc = device_private(dev);
   11558 	uint32_t mdic = 0;
   11559 	int i;
   11560 
   11561 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11562 	    && (reg > MII_ADDRMASK)) {
   11563 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11564 		    __func__, sc->sc_phytype, reg);
   11565 		reg &= MII_ADDRMASK;
   11566 	}
   11567 
   11568 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   11569 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   11570 
   11571 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11572 		delay(50);
   11573 		mdic = CSR_READ(sc, WMREG_MDIC);
   11574 		if (mdic & MDIC_READY)
   11575 			break;
   11576 	}
   11577 
   11578 	if ((mdic & MDIC_READY) == 0) {
   11579 		DPRINTF(sc, WM_DEBUG_GMII,
   11580 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   11581 			device_xname(dev), phy, reg));
   11582 		return ETIMEDOUT;
   11583 	} else if (mdic & MDIC_E) {
   11584 		DPRINTF(sc, WM_DEBUG_GMII,
   11585 		    ("%s: MDIC write error: phy %d reg %d\n",
   11586 			device_xname(dev), phy, reg));
   11587 		return -1;
   11588 	}
   11589 
   11590 	/*
   11591 	 * Allow some time after each MDIC transaction to avoid
   11592 	 * reading duplicate data in the next MDIC transaction.
   11593 	 */
   11594 	if (sc->sc_type == WM_T_PCH2)
   11595 		delay(100);
   11596 
   11597 	return 0;
   11598 }
   11599 
   11600 /*
   11601  * wm_gmii_i82544_readreg:	[mii interface function]
   11602  *
   11603  *	Read a PHY register on the GMII.
   11604  */
   11605 static int
   11606 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11607 {
   11608 	struct wm_softc *sc = device_private(dev);
   11609 	int rv;
   11610 
   11611 	if (sc->phy.acquire(sc)) {
   11612 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11613 		return -1;
   11614 	}
   11615 
   11616 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   11617 
   11618 	sc->phy.release(sc);
   11619 
   11620 	return rv;
   11621 }
   11622 
   11623 static int
   11624 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11625 {
   11626 	struct wm_softc *sc = device_private(dev);
   11627 	int rv;
   11628 
   11629 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11630 		switch (sc->sc_phytype) {
   11631 		case WMPHY_IGP:
   11632 		case WMPHY_IGP_2:
   11633 		case WMPHY_IGP_3:
   11634 			rv = wm_gmii_mdic_writereg(dev, phy,
   11635 			    IGPHY_PAGE_SELECT, reg);
   11636 			if (rv != 0)
   11637 				return rv;
   11638 			break;
   11639 		default:
   11640 #ifdef WM_DEBUG
   11641 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   11642 			    __func__, sc->sc_phytype, reg);
   11643 #endif
   11644 			break;
   11645 		}
   11646 	}
   11647 
   11648 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11649 }
   11650 
   11651 /*
   11652  * wm_gmii_i82544_writereg:	[mii interface function]
   11653  *
   11654  *	Write a PHY register on the GMII.
   11655  */
   11656 static int
   11657 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   11658 {
   11659 	struct wm_softc *sc = device_private(dev);
   11660 	int rv;
   11661 
   11662 	if (sc->phy.acquire(sc)) {
   11663 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11664 		return -1;
   11665 	}
   11666 
   11667 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   11668 	sc->phy.release(sc);
   11669 
   11670 	return rv;
   11671 }
   11672 
   11673 static int
   11674 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11675 {
   11676 	struct wm_softc *sc = device_private(dev);
   11677 	int rv;
   11678 
   11679 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11680 		switch (sc->sc_phytype) {
   11681 		case WMPHY_IGP:
   11682 		case WMPHY_IGP_2:
   11683 		case WMPHY_IGP_3:
   11684 			rv = wm_gmii_mdic_writereg(dev, phy,
   11685 			    IGPHY_PAGE_SELECT, reg);
   11686 			if (rv != 0)
   11687 				return rv;
   11688 			break;
   11689 		default:
   11690 #ifdef WM_DEBUG
   11691 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   11692 			    __func__, sc->sc_phytype, reg);
   11693 #endif
   11694 			break;
   11695 		}
   11696 	}
   11697 
   11698 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11699 }
   11700 
   11701 /*
   11702  * wm_gmii_i80003_readreg:	[mii interface function]
   11703  *
   11704  *	Read a PHY register on the kumeran
   11705  * This could be handled by the PHY layer if we didn't have to lock the
   11706  * resource ...
   11707  */
   11708 static int
   11709 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11710 {
   11711 	struct wm_softc *sc = device_private(dev);
   11712 	int page_select;
   11713 	uint16_t temp, temp2;
   11714 	int rv = 0;
   11715 
   11716 	if (phy != 1) /* Only one PHY on kumeran bus */
   11717 		return -1;
   11718 
   11719 	if (sc->phy.acquire(sc)) {
   11720 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11721 		return -1;
   11722 	}
   11723 
   11724 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11725 		page_select = GG82563_PHY_PAGE_SELECT;
   11726 	else {
   11727 		/*
   11728 		 * Use Alternative Page Select register to access registers
   11729 		 * 30 and 31.
   11730 		 */
   11731 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11732 	}
   11733 	temp = reg >> GG82563_PAGE_SHIFT;
   11734 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11735 		goto out;
   11736 
   11737 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11738 		/*
   11739 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11740 		 * register.
   11741 		 */
   11742 		delay(200);
   11743 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11744 		if ((rv != 0) || (temp2 != temp)) {
   11745 			device_printf(dev, "%s failed\n", __func__);
   11746 			rv = -1;
   11747 			goto out;
   11748 		}
   11749 		delay(200);
   11750 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11751 		delay(200);
   11752 	} else
   11753 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11754 
   11755 out:
   11756 	sc->phy.release(sc);
   11757 	return rv;
   11758 }
   11759 
   11760 /*
   11761  * wm_gmii_i80003_writereg:	[mii interface function]
   11762  *
   11763  *	Write a PHY register on the kumeran.
   11764  * This could be handled by the PHY layer if we didn't have to lock the
   11765  * resource ...
   11766  */
   11767 static int
   11768 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   11769 {
   11770 	struct wm_softc *sc = device_private(dev);
   11771 	int page_select, rv;
   11772 	uint16_t temp, temp2;
   11773 
   11774 	if (phy != 1) /* Only one PHY on kumeran bus */
   11775 		return -1;
   11776 
   11777 	if (sc->phy.acquire(sc)) {
   11778 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11779 		return -1;
   11780 	}
   11781 
   11782 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11783 		page_select = GG82563_PHY_PAGE_SELECT;
   11784 	else {
   11785 		/*
   11786 		 * Use Alternative Page Select register to access registers
   11787 		 * 30 and 31.
   11788 		 */
   11789 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11790 	}
   11791 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   11792 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11793 		goto out;
   11794 
   11795 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11796 		/*
   11797 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11798 		 * register.
   11799 		 */
   11800 		delay(200);
   11801 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11802 		if ((rv != 0) || (temp2 != temp)) {
   11803 			device_printf(dev, "%s failed\n", __func__);
   11804 			rv = -1;
   11805 			goto out;
   11806 		}
   11807 		delay(200);
   11808 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11809 		delay(200);
   11810 	} else
   11811 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11812 
   11813 out:
   11814 	sc->phy.release(sc);
   11815 	return rv;
   11816 }
   11817 
   11818 /*
   11819  * wm_gmii_bm_readreg:	[mii interface function]
   11820  *
   11821  *	Read a PHY register on the kumeran
   11822  * This could be handled by the PHY layer if we didn't have to lock the
   11823  * resource ...
   11824  */
   11825 static int
   11826 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11827 {
   11828 	struct wm_softc *sc = device_private(dev);
   11829 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11830 	int rv;
   11831 
   11832 	if (sc->phy.acquire(sc)) {
   11833 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11834 		return -1;
   11835 	}
   11836 
   11837 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11838 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11839 		    || (reg == 31)) ? 1 : phy;
   11840 	/* Page 800 works differently than the rest so it has its own func */
   11841 	if (page == BM_WUC_PAGE) {
   11842 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11843 		goto release;
   11844 	}
   11845 
   11846 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11847 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11848 		    && (sc->sc_type != WM_T_82583))
   11849 			rv = wm_gmii_mdic_writereg(dev, phy,
   11850 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11851 		else
   11852 			rv = wm_gmii_mdic_writereg(dev, phy,
   11853 			    BME1000_PHY_PAGE_SELECT, page);
   11854 		if (rv != 0)
   11855 			goto release;
   11856 	}
   11857 
   11858 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11859 
   11860 release:
   11861 	sc->phy.release(sc);
   11862 	return rv;
   11863 }
   11864 
   11865 /*
   11866  * wm_gmii_bm_writereg:	[mii interface function]
   11867  *
   11868  *	Write a PHY register on the kumeran.
   11869  * This could be handled by the PHY layer if we didn't have to lock the
   11870  * resource ...
   11871  */
   11872 static int
   11873 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   11874 {
   11875 	struct wm_softc *sc = device_private(dev);
   11876 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11877 	int rv;
   11878 
   11879 	if (sc->phy.acquire(sc)) {
   11880 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11881 		return -1;
   11882 	}
   11883 
   11884 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11885 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11886 		    || (reg == 31)) ? 1 : phy;
   11887 	/* Page 800 works differently than the rest so it has its own func */
   11888 	if (page == BM_WUC_PAGE) {
   11889 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   11890 		goto release;
   11891 	}
   11892 
   11893 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11894 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11895 		    && (sc->sc_type != WM_T_82583))
   11896 			rv = wm_gmii_mdic_writereg(dev, phy,
   11897 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11898 		else
   11899 			rv = wm_gmii_mdic_writereg(dev, phy,
   11900 			    BME1000_PHY_PAGE_SELECT, page);
   11901 		if (rv != 0)
   11902 			goto release;
   11903 	}
   11904 
   11905 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11906 
   11907 release:
   11908 	sc->phy.release(sc);
   11909 	return rv;
   11910 }
   11911 
   11912 /*
   11913  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   11914  *  @dev: pointer to the HW structure
   11915  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   11916  *
   11917  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   11918  *  address to store contents of the BM_WUC_ENABLE_REG register.
   11919  */
   11920 static int
   11921 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11922 {
   11923 #ifdef WM_DEBUG
   11924 	struct wm_softc *sc = device_private(dev);
   11925 #endif
   11926 	uint16_t temp;
   11927 	int rv;
   11928 
   11929 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11930 		device_xname(dev), __func__));
   11931 
   11932 	if (!phy_regp)
   11933 		return -1;
   11934 
   11935 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   11936 
   11937 	/* Select Port Control Registers page */
   11938 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11939 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11940 	if (rv != 0)
   11941 		return rv;
   11942 
   11943 	/* Read WUCE and save it */
   11944 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   11945 	if (rv != 0)
   11946 		return rv;
   11947 
   11948 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   11949 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   11950 	 */
   11951 	temp = *phy_regp;
   11952 	temp |= BM_WUC_ENABLE_BIT;
   11953 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   11954 
   11955 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   11956 		return rv;
   11957 
   11958 	/* Select Host Wakeup Registers page - caller now able to write
   11959 	 * registers on the Wakeup registers page
   11960 	 */
   11961 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11962 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   11963 }
   11964 
   11965 /*
   11966  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   11967  *  @dev: pointer to the HW structure
   11968  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   11969  *
   11970  *  Restore BM_WUC_ENABLE_REG to its original value.
   11971  *
   11972  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   11973  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   11974  *  caller.
   11975  */
   11976 static int
   11977 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11978 {
   11979 #ifdef WM_DEBUG
   11980 	struct wm_softc *sc = device_private(dev);
   11981 #endif
   11982 
   11983 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11984 		device_xname(dev), __func__));
   11985 
   11986 	if (!phy_regp)
   11987 		return -1;
   11988 
   11989 	/* Select Port Control Registers page */
   11990 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11991 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11992 
   11993 	/* Restore 769.17 to its original value */
   11994 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   11995 
   11996 	return 0;
   11997 }
   11998 
   11999 /*
   12000  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   12001  *  @sc: pointer to the HW structure
   12002  *  @offset: register offset to be read or written
   12003  *  @val: pointer to the data to read or write
   12004  *  @rd: determines if operation is read or write
   12005  *  @page_set: BM_WUC_PAGE already set and access enabled
   12006  *
   12007  *  Read the PHY register at offset and store the retrieved information in
   12008  *  data, or write data to PHY register at offset.  Note the procedure to
   12009  *  access the PHY wakeup registers is different than reading the other PHY
   12010  *  registers. It works as such:
   12011  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   12012  *  2) Set page to 800 for host (801 if we were manageability)
   12013  *  3) Write the address using the address opcode (0x11)
   12014  *  4) Read or write the data using the data opcode (0x12)
   12015  *  5) Restore 769.17.2 to its original value
   12016  *
   12017  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   12018  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   12019  *
   12020  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   12021  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   12022  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   12023  */
   12024 static int
   12025 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   12026 	bool page_set)
   12027 {
   12028 	struct wm_softc *sc = device_private(dev);
   12029 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   12030 	uint16_t page = BM_PHY_REG_PAGE(offset);
   12031 	uint16_t wuce;
   12032 	int rv = 0;
   12033 
   12034 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12035 		device_xname(dev), __func__));
   12036 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   12037 	if ((sc->sc_type == WM_T_PCH)
   12038 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   12039 		device_printf(dev,
   12040 		    "Attempting to access page %d while gig enabled.\n", page);
   12041 	}
   12042 
   12043 	if (!page_set) {
   12044 		/* Enable access to PHY wakeup registers */
   12045 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   12046 		if (rv != 0) {
   12047 			device_printf(dev,
   12048 			    "%s: Could not enable PHY wakeup reg access\n",
   12049 			    __func__);
   12050 			return rv;
   12051 		}
   12052 	}
   12053 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   12054 		device_xname(sc->sc_dev), __func__, page, regnum));
   12055 
   12056 	/*
   12057 	 * 2) Access PHY wakeup register.
   12058 	 * See wm_access_phy_wakeup_reg_bm.
   12059 	 */
   12060 
   12061 	/* Write the Wakeup register page offset value using opcode 0x11 */
   12062 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   12063 	if (rv != 0)
   12064 		return rv;
   12065 
   12066 	if (rd) {
   12067 		/* Read the Wakeup register page value using opcode 0x12 */
   12068 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   12069 	} else {
   12070 		/* Write the Wakeup register page value using opcode 0x12 */
   12071 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   12072 	}
   12073 	if (rv != 0)
   12074 		return rv;
   12075 
   12076 	if (!page_set)
   12077 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   12078 
   12079 	return rv;
   12080 }
   12081 
   12082 /*
   12083  * wm_gmii_hv_readreg:	[mii interface function]
   12084  *
   12085  *	Read a PHY register on the kumeran
   12086  * This could be handled by the PHY layer if we didn't have to lock the
   12087  * resource ...
   12088  */
   12089 static int
   12090 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12091 {
   12092 	struct wm_softc *sc = device_private(dev);
   12093 	int rv;
   12094 
   12095 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12096 		device_xname(dev), __func__));
   12097 	if (sc->phy.acquire(sc)) {
   12098 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12099 		return -1;
   12100 	}
   12101 
   12102 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   12103 	sc->phy.release(sc);
   12104 	return rv;
   12105 }
   12106 
   12107 static int
   12108 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12109 {
   12110 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12111 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12112 	int rv;
   12113 
   12114 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12115 
   12116 	/* Page 800 works differently than the rest so it has its own func */
   12117 	if (page == BM_WUC_PAGE)
   12118 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   12119 
   12120 	/*
   12121 	 * Lower than page 768 works differently than the rest so it has its
   12122 	 * own func
   12123 	 */
   12124 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12125 		device_printf(dev, "gmii_hv_readreg!!!\n");
   12126 		return -1;
   12127 	}
   12128 
   12129 	/*
   12130 	 * XXX I21[789] documents say that the SMBus Address register is at
   12131 	 * PHY address 01, Page 0 (not 768), Register 26.
   12132 	 */
   12133 	if (page == HV_INTC_FC_PAGE_START)
   12134 		page = 0;
   12135 
   12136 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12137 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12138 		    page << BME1000_PAGE_SHIFT);
   12139 		if (rv != 0)
   12140 			return rv;
   12141 	}
   12142 
   12143 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   12144 }
   12145 
   12146 /*
   12147  * wm_gmii_hv_writereg:	[mii interface function]
   12148  *
   12149  *	Write a PHY register on the kumeran.
   12150  * This could be handled by the PHY layer if we didn't have to lock the
   12151  * resource ...
   12152  */
   12153 static int
   12154 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   12155 {
   12156 	struct wm_softc *sc = device_private(dev);
   12157 	int rv;
   12158 
   12159 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12160 		device_xname(dev), __func__));
   12161 
   12162 	if (sc->phy.acquire(sc)) {
   12163 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12164 		return -1;
   12165 	}
   12166 
   12167 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   12168 	sc->phy.release(sc);
   12169 
   12170 	return rv;
   12171 }
   12172 
   12173 static int
   12174 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12175 {
   12176 	struct wm_softc *sc = device_private(dev);
   12177 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12178 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12179 	int rv;
   12180 
   12181 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12182 
   12183 	/* Page 800 works differently than the rest so it has its own func */
   12184 	if (page == BM_WUC_PAGE)
   12185 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   12186 		    false);
   12187 
   12188 	/*
   12189 	 * Lower than page 768 works differently than the rest so it has its
   12190 	 * own func
   12191 	 */
   12192 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12193 		device_printf(dev, "gmii_hv_writereg!!!\n");
   12194 		return -1;
   12195 	}
   12196 
   12197 	{
   12198 		/*
   12199 		 * XXX I21[789] documents say that the SMBus Address register
   12200 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   12201 		 */
   12202 		if (page == HV_INTC_FC_PAGE_START)
   12203 			page = 0;
   12204 
   12205 		/*
   12206 		 * XXX Workaround MDIO accesses being disabled after entering
   12207 		 * IEEE Power Down (whenever bit 11 of the PHY control
   12208 		 * register is set)
   12209 		 */
   12210 		if (sc->sc_phytype == WMPHY_82578) {
   12211 			struct mii_softc *child;
   12212 
   12213 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12214 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   12215 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   12216 			    && ((val & (1 << 11)) != 0)) {
   12217 				device_printf(dev, "XXX need workaround\n");
   12218 			}
   12219 		}
   12220 
   12221 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12222 			rv = wm_gmii_mdic_writereg(dev, 1,
   12223 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12224 			if (rv != 0)
   12225 				return rv;
   12226 		}
   12227 	}
   12228 
   12229 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   12230 }
   12231 
   12232 /*
   12233  * wm_gmii_82580_readreg:	[mii interface function]
   12234  *
   12235  *	Read a PHY register on the 82580 and I350.
   12236  * This could be handled by the PHY layer if we didn't have to lock the
   12237  * resource ...
   12238  */
   12239 static int
   12240 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12241 {
   12242 	struct wm_softc *sc = device_private(dev);
   12243 	int rv;
   12244 
   12245 	if (sc->phy.acquire(sc) != 0) {
   12246 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12247 		return -1;
   12248 	}
   12249 
   12250 #ifdef DIAGNOSTIC
   12251 	if (reg > MII_ADDRMASK) {
   12252 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12253 		    __func__, sc->sc_phytype, reg);
   12254 		reg &= MII_ADDRMASK;
   12255 	}
   12256 #endif
   12257 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   12258 
   12259 	sc->phy.release(sc);
   12260 	return rv;
   12261 }
   12262 
   12263 /*
   12264  * wm_gmii_82580_writereg:	[mii interface function]
   12265  *
   12266  *	Write a PHY register on the 82580 and I350.
   12267  * This could be handled by the PHY layer if we didn't have to lock the
   12268  * resource ...
   12269  */
   12270 static int
   12271 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   12272 {
   12273 	struct wm_softc *sc = device_private(dev);
   12274 	int rv;
   12275 
   12276 	if (sc->phy.acquire(sc) != 0) {
   12277 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12278 		return -1;
   12279 	}
   12280 
   12281 #ifdef DIAGNOSTIC
   12282 	if (reg > MII_ADDRMASK) {
   12283 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12284 		    __func__, sc->sc_phytype, reg);
   12285 		reg &= MII_ADDRMASK;
   12286 	}
   12287 #endif
   12288 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   12289 
   12290 	sc->phy.release(sc);
   12291 	return rv;
   12292 }
   12293 
   12294 /*
   12295  * wm_gmii_gs40g_readreg:	[mii interface function]
   12296  *
   12297  *	Read a PHY register on the I2100 and I211.
   12298  * This could be handled by the PHY layer if we didn't have to lock the
   12299  * resource ...
   12300  */
   12301 static int
   12302 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12303 {
   12304 	struct wm_softc *sc = device_private(dev);
   12305 	int page, offset;
   12306 	int rv;
   12307 
   12308 	/* Acquire semaphore */
   12309 	if (sc->phy.acquire(sc)) {
   12310 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12311 		return -1;
   12312 	}
   12313 
   12314 	/* Page select */
   12315 	page = reg >> GS40G_PAGE_SHIFT;
   12316 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12317 	if (rv != 0)
   12318 		goto release;
   12319 
   12320 	/* Read reg */
   12321 	offset = reg & GS40G_OFFSET_MASK;
   12322 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   12323 
   12324 release:
   12325 	sc->phy.release(sc);
   12326 	return rv;
   12327 }
   12328 
   12329 /*
   12330  * wm_gmii_gs40g_writereg:	[mii interface function]
   12331  *
   12332  *	Write a PHY register on the I210 and I211.
   12333  * This could be handled by the PHY layer if we didn't have to lock the
   12334  * resource ...
   12335  */
   12336 static int
   12337 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   12338 {
   12339 	struct wm_softc *sc = device_private(dev);
   12340 	uint16_t page;
   12341 	int offset, rv;
   12342 
   12343 	/* Acquire semaphore */
   12344 	if (sc->phy.acquire(sc)) {
   12345 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12346 		return -1;
   12347 	}
   12348 
   12349 	/* Page select */
   12350 	page = reg >> GS40G_PAGE_SHIFT;
   12351 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12352 	if (rv != 0)
   12353 		goto release;
   12354 
   12355 	/* Write reg */
   12356 	offset = reg & GS40G_OFFSET_MASK;
   12357 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   12358 
   12359 release:
   12360 	/* Release semaphore */
   12361 	sc->phy.release(sc);
   12362 	return rv;
   12363 }
   12364 
   12365 /*
   12366  * wm_gmii_statchg:	[mii interface function]
   12367  *
   12368  *	Callback from MII layer when media changes.
   12369  */
   12370 static void
   12371 wm_gmii_statchg(struct ifnet *ifp)
   12372 {
   12373 	struct wm_softc *sc = ifp->if_softc;
   12374 	struct mii_data *mii = &sc->sc_mii;
   12375 
   12376 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   12377 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12378 	sc->sc_fcrtl &= ~FCRTL_XONE;
   12379 
   12380 	/* Get flow control negotiation result. */
   12381 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   12382 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   12383 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   12384 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   12385 	}
   12386 
   12387 	if (sc->sc_flowflags & IFM_FLOW) {
   12388 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   12389 			sc->sc_ctrl |= CTRL_TFCE;
   12390 			sc->sc_fcrtl |= FCRTL_XONE;
   12391 		}
   12392 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   12393 			sc->sc_ctrl |= CTRL_RFCE;
   12394 	}
   12395 
   12396 	if (mii->mii_media_active & IFM_FDX) {
   12397 		DPRINTF(sc, WM_DEBUG_LINK,
   12398 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   12399 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12400 	} else {
   12401 		DPRINTF(sc, WM_DEBUG_LINK,
   12402 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   12403 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12404 	}
   12405 
   12406 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12407 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12408 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   12409 						 : WMREG_FCRTL, sc->sc_fcrtl);
   12410 	if (sc->sc_type == WM_T_80003) {
   12411 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   12412 		case IFM_1000_T:
   12413 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12414 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   12415 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   12416 			break;
   12417 		default:
   12418 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12419 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   12420 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   12421 			break;
   12422 		}
   12423 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   12424 	}
   12425 }
   12426 
   12427 /* kumeran related (80003, ICH* and PCH*) */
   12428 
   12429 /*
   12430  * wm_kmrn_readreg:
   12431  *
   12432  *	Read a kumeran register
   12433  */
   12434 static int
   12435 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   12436 {
   12437 	int rv;
   12438 
   12439 	if (sc->sc_type == WM_T_80003)
   12440 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12441 	else
   12442 		rv = sc->phy.acquire(sc);
   12443 	if (rv != 0) {
   12444 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   12445 		    __func__);
   12446 		return rv;
   12447 	}
   12448 
   12449 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   12450 
   12451 	if (sc->sc_type == WM_T_80003)
   12452 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12453 	else
   12454 		sc->phy.release(sc);
   12455 
   12456 	return rv;
   12457 }
   12458 
   12459 static int
   12460 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   12461 {
   12462 
   12463 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12464 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   12465 	    KUMCTRLSTA_REN);
   12466 	CSR_WRITE_FLUSH(sc);
   12467 	delay(2);
   12468 
   12469 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   12470 
   12471 	return 0;
   12472 }
   12473 
   12474 /*
   12475  * wm_kmrn_writereg:
   12476  *
   12477  *	Write a kumeran register
   12478  */
   12479 static int
   12480 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   12481 {
   12482 	int rv;
   12483 
   12484 	if (sc->sc_type == WM_T_80003)
   12485 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12486 	else
   12487 		rv = sc->phy.acquire(sc);
   12488 	if (rv != 0) {
   12489 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   12490 		    __func__);
   12491 		return rv;
   12492 	}
   12493 
   12494 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   12495 
   12496 	if (sc->sc_type == WM_T_80003)
   12497 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12498 	else
   12499 		sc->phy.release(sc);
   12500 
   12501 	return rv;
   12502 }
   12503 
   12504 static int
   12505 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   12506 {
   12507 
   12508 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12509 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   12510 
   12511 	return 0;
   12512 }
   12513 
   12514 /*
   12515  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   12516  * This access method is different from IEEE MMD.
   12517  */
   12518 static int
   12519 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   12520 {
   12521 	struct wm_softc *sc = device_private(dev);
   12522 	int rv;
   12523 
   12524 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   12525 	if (rv != 0)
   12526 		return rv;
   12527 
   12528 	if (rd)
   12529 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   12530 	else
   12531 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   12532 	return rv;
   12533 }
   12534 
   12535 static int
   12536 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   12537 {
   12538 
   12539 	return wm_access_emi_reg_locked(dev, reg, val, true);
   12540 }
   12541 
   12542 static int
   12543 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   12544 {
   12545 
   12546 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   12547 }
   12548 
   12549 /* SGMII related */
   12550 
   12551 /*
   12552  * wm_sgmii_uses_mdio
   12553  *
   12554  * Check whether the transaction is to the internal PHY or the external
   12555  * MDIO interface. Return true if it's MDIO.
   12556  */
   12557 static bool
   12558 wm_sgmii_uses_mdio(struct wm_softc *sc)
   12559 {
   12560 	uint32_t reg;
   12561 	bool ismdio = false;
   12562 
   12563 	switch (sc->sc_type) {
   12564 	case WM_T_82575:
   12565 	case WM_T_82576:
   12566 		reg = CSR_READ(sc, WMREG_MDIC);
   12567 		ismdio = ((reg & MDIC_DEST) != 0);
   12568 		break;
   12569 	case WM_T_82580:
   12570 	case WM_T_I350:
   12571 	case WM_T_I354:
   12572 	case WM_T_I210:
   12573 	case WM_T_I211:
   12574 		reg = CSR_READ(sc, WMREG_MDICNFG);
   12575 		ismdio = ((reg & MDICNFG_DEST) != 0);
   12576 		break;
   12577 	default:
   12578 		break;
   12579 	}
   12580 
   12581 	return ismdio;
   12582 }
   12583 
   12584 /* Setup internal SGMII PHY for SFP */
   12585 static void
   12586 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
   12587 {
   12588 	uint16_t id1, id2, phyreg;
   12589 	int i, rv;
   12590 
   12591 	if (((sc->sc_flags & WM_F_SGMII) == 0)
   12592 	    || ((sc->sc_flags & WM_F_SFP) == 0))
   12593 		return;
   12594 
   12595 	for (i = 0; i < MII_NPHY; i++) {
   12596 		sc->phy.no_errprint = true;
   12597 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
   12598 		if (rv != 0)
   12599 			continue;
   12600 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
   12601 		if (rv != 0)
   12602 			continue;
   12603 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
   12604 			continue;
   12605 		sc->phy.no_errprint = false;
   12606 
   12607 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
   12608 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
   12609 		phyreg |= ESSR_SGMII_WOC_COPPER;
   12610 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
   12611 		break;
   12612 	}
   12613 
   12614 }
   12615 
   12616 /*
   12617  * wm_sgmii_readreg:	[mii interface function]
   12618  *
   12619  *	Read a PHY register on the SGMII
   12620  * This could be handled by the PHY layer if we didn't have to lock the
   12621  * resource ...
   12622  */
   12623 static int
   12624 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12625 {
   12626 	struct wm_softc *sc = device_private(dev);
   12627 	int rv;
   12628 
   12629 	if (sc->phy.acquire(sc)) {
   12630 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12631 		return -1;
   12632 	}
   12633 
   12634 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   12635 
   12636 	sc->phy.release(sc);
   12637 	return rv;
   12638 }
   12639 
   12640 static int
   12641 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12642 {
   12643 	struct wm_softc *sc = device_private(dev);
   12644 	uint32_t i2ccmd;
   12645 	int i, rv = 0;
   12646 
   12647 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12648 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12649 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12650 
   12651 	/* Poll the ready bit */
   12652 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12653 		delay(50);
   12654 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12655 		if (i2ccmd & I2CCMD_READY)
   12656 			break;
   12657 	}
   12658 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12659 		device_printf(dev, "I2CCMD Read did not complete\n");
   12660 		rv = ETIMEDOUT;
   12661 	}
   12662 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12663 		if (!sc->phy.no_errprint)
   12664 			device_printf(dev, "I2CCMD Error bit set\n");
   12665 		rv = EIO;
   12666 	}
   12667 
   12668 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   12669 
   12670 	return rv;
   12671 }
   12672 
   12673 /*
   12674  * wm_sgmii_writereg:	[mii interface function]
   12675  *
   12676  *	Write a PHY register on the SGMII.
   12677  * This could be handled by the PHY layer if we didn't have to lock the
   12678  * resource ...
   12679  */
   12680 static int
   12681 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   12682 {
   12683 	struct wm_softc *sc = device_private(dev);
   12684 	int rv;
   12685 
   12686 	if (sc->phy.acquire(sc) != 0) {
   12687 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12688 		return -1;
   12689 	}
   12690 
   12691 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   12692 
   12693 	sc->phy.release(sc);
   12694 
   12695 	return rv;
   12696 }
   12697 
   12698 static int
   12699 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12700 {
   12701 	struct wm_softc *sc = device_private(dev);
   12702 	uint32_t i2ccmd;
   12703 	uint16_t swapdata;
   12704 	int rv = 0;
   12705 	int i;
   12706 
   12707 	/* Swap the data bytes for the I2C interface */
   12708 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   12709 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12710 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   12711 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12712 
   12713 	/* Poll the ready bit */
   12714 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12715 		delay(50);
   12716 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12717 		if (i2ccmd & I2CCMD_READY)
   12718 			break;
   12719 	}
   12720 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12721 		device_printf(dev, "I2CCMD Write did not complete\n");
   12722 		rv = ETIMEDOUT;
   12723 	}
   12724 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12725 		device_printf(dev, "I2CCMD Error bit set\n");
   12726 		rv = EIO;
   12727 	}
   12728 
   12729 	return rv;
   12730 }
   12731 
   12732 /* TBI related */
   12733 
   12734 static bool
   12735 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   12736 {
   12737 	bool sig;
   12738 
   12739 	sig = ctrl & CTRL_SWDPIN(1);
   12740 
   12741 	/*
   12742 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   12743 	 * detect a signal, 1 if they don't.
   12744 	 */
   12745 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   12746 		sig = !sig;
   12747 
   12748 	return sig;
   12749 }
   12750 
   12751 /*
   12752  * wm_tbi_mediainit:
   12753  *
   12754  *	Initialize media for use on 1000BASE-X devices.
   12755  */
   12756 static void
   12757 wm_tbi_mediainit(struct wm_softc *sc)
   12758 {
   12759 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12760 	const char *sep = "";
   12761 
   12762 	if (sc->sc_type < WM_T_82543)
   12763 		sc->sc_tipg = TIPG_WM_DFLT;
   12764 	else
   12765 		sc->sc_tipg = TIPG_LG_DFLT;
   12766 
   12767 	sc->sc_tbi_serdes_anegticks = 5;
   12768 
   12769 	/* Initialize our media structures */
   12770 	sc->sc_mii.mii_ifp = ifp;
   12771 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   12772 
   12773 	ifp->if_baudrate = IF_Gbps(1);
   12774 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   12775 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12776 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12777 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   12778 		    sc->sc_core_lock);
   12779 	} else {
   12780 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12781 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   12782 	}
   12783 
   12784 	/*
   12785 	 * SWD Pins:
   12786 	 *
   12787 	 *	0 = Link LED (output)
   12788 	 *	1 = Loss Of Signal (input)
   12789 	 */
   12790 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   12791 
   12792 	/* XXX Perhaps this is only for TBI */
   12793 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12794 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   12795 
   12796 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   12797 		sc->sc_ctrl &= ~CTRL_LRST;
   12798 
   12799 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12800 
   12801 #define	ADD(ss, mm, dd)							  \
   12802 do {									  \
   12803 	aprint_normal("%s%s", sep, ss);					  \
   12804 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   12805 	sep = ", ";							  \
   12806 } while (/*CONSTCOND*/0)
   12807 
   12808 	aprint_normal_dev(sc->sc_dev, "");
   12809 
   12810 	if (sc->sc_type == WM_T_I354) {
   12811 		uint32_t status;
   12812 
   12813 		status = CSR_READ(sc, WMREG_STATUS);
   12814 		if (((status & STATUS_2P5_SKU) != 0)
   12815 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12816 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   12817 		} else
   12818 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   12819 	} else if (sc->sc_type == WM_T_82545) {
   12820 		/* Only 82545 is LX (XXX except SFP) */
   12821 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12822 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12823 	} else if (sc->sc_sfptype != 0) {
   12824 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   12825 		switch (sc->sc_sfptype) {
   12826 		default:
   12827 		case SFF_SFP_ETH_FLAGS_1000SX:
   12828 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12829 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12830 			break;
   12831 		case SFF_SFP_ETH_FLAGS_1000LX:
   12832 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12833 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12834 			break;
   12835 		case SFF_SFP_ETH_FLAGS_1000CX:
   12836 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   12837 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   12838 			break;
   12839 		case SFF_SFP_ETH_FLAGS_1000T:
   12840 			ADD("1000baseT", IFM_1000_T, 0);
   12841 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   12842 			break;
   12843 		case SFF_SFP_ETH_FLAGS_100FX:
   12844 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   12845 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   12846 			break;
   12847 		}
   12848 	} else {
   12849 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12850 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12851 	}
   12852 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   12853 	aprint_normal("\n");
   12854 
   12855 #undef ADD
   12856 
   12857 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   12858 }
   12859 
   12860 /*
   12861  * wm_tbi_mediachange:	[ifmedia interface function]
   12862  *
   12863  *	Set hardware to newly-selected media on a 1000BASE-X device.
   12864  */
   12865 static int
   12866 wm_tbi_mediachange(struct ifnet *ifp)
   12867 {
   12868 	struct wm_softc *sc = ifp->if_softc;
   12869 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12870 	uint32_t status, ctrl;
   12871 	bool signal;
   12872 	int i;
   12873 
   12874 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   12875 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12876 		/* XXX need some work for >= 82571 and < 82575 */
   12877 		if (sc->sc_type < WM_T_82575)
   12878 			return 0;
   12879 	}
   12880 
   12881 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12882 	    || (sc->sc_type >= WM_T_82575))
   12883 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12884 
   12885 	sc->sc_ctrl &= ~CTRL_LRST;
   12886 	sc->sc_txcw = TXCW_ANE;
   12887 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12888 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   12889 	else if (ife->ifm_media & IFM_FDX)
   12890 		sc->sc_txcw |= TXCW_FD;
   12891 	else
   12892 		sc->sc_txcw |= TXCW_HD;
   12893 
   12894 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   12895 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   12896 
   12897 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   12898 		device_xname(sc->sc_dev), sc->sc_txcw));
   12899 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12900 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12901 	CSR_WRITE_FLUSH(sc);
   12902 	delay(1000);
   12903 
   12904 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12905 	signal = wm_tbi_havesignal(sc, ctrl);
   12906 
   12907 	DPRINTF(sc, WM_DEBUG_LINK,
   12908 	    ("%s: signal = %d\n", device_xname(sc->sc_dev), signal));
   12909 
   12910 	if (signal) {
   12911 		/* Have signal; wait for the link to come up. */
   12912 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   12913 			delay(10000);
   12914 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   12915 				break;
   12916 		}
   12917 
   12918 		DPRINTF(sc, WM_DEBUG_LINK,
   12919 		    ("%s: i = %d after waiting for link\n",
   12920 			device_xname(sc->sc_dev), i));
   12921 
   12922 		status = CSR_READ(sc, WMREG_STATUS);
   12923 		DPRINTF(sc, WM_DEBUG_LINK,
   12924 		    ("%s: status after final read = 0x%x, STATUS_LU = %#"
   12925 			__PRIxBIT "\n",
   12926 			device_xname(sc->sc_dev), status, STATUS_LU));
   12927 		if (status & STATUS_LU) {
   12928 			/* Link is up. */
   12929 			DPRINTF(sc, WM_DEBUG_LINK,
   12930 			    ("%s: LINK: set media -> link up %s\n",
   12931 				device_xname(sc->sc_dev),
   12932 				(status & STATUS_FD) ? "FDX" : "HDX"));
   12933 
   12934 			/*
   12935 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   12936 			 * so we should update sc->sc_ctrl
   12937 			 */
   12938 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   12939 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12940 			sc->sc_fcrtl &= ~FCRTL_XONE;
   12941 			if (status & STATUS_FD)
   12942 				sc->sc_tctl |=
   12943 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12944 			else
   12945 				sc->sc_tctl |=
   12946 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12947 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   12948 				sc->sc_fcrtl |= FCRTL_XONE;
   12949 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12950 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12951 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12952 			sc->sc_tbi_linkup = 1;
   12953 		} else {
   12954 			if (i == WM_LINKUP_TIMEOUT)
   12955 				wm_check_for_link(sc);
   12956 			/* Link is down. */
   12957 			DPRINTF(sc, WM_DEBUG_LINK,
   12958 			    ("%s: LINK: set media -> link down\n",
   12959 				device_xname(sc->sc_dev)));
   12960 			sc->sc_tbi_linkup = 0;
   12961 		}
   12962 	} else {
   12963 		DPRINTF(sc, WM_DEBUG_LINK,
   12964 		    ("%s: LINK: set media -> no signal\n",
   12965 			device_xname(sc->sc_dev)));
   12966 		sc->sc_tbi_linkup = 0;
   12967 	}
   12968 
   12969 	wm_tbi_serdes_set_linkled(sc);
   12970 
   12971 	return 0;
   12972 }
   12973 
   12974 /*
   12975  * wm_tbi_mediastatus:	[ifmedia interface function]
   12976  *
   12977  *	Get the current interface media status on a 1000BASE-X device.
   12978  */
   12979 static void
   12980 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12981 {
   12982 	struct wm_softc *sc = ifp->if_softc;
   12983 	uint32_t ctrl, status;
   12984 
   12985 	ifmr->ifm_status = IFM_AVALID;
   12986 	ifmr->ifm_active = IFM_ETHER;
   12987 
   12988 	status = CSR_READ(sc, WMREG_STATUS);
   12989 	if ((status & STATUS_LU) == 0) {
   12990 		ifmr->ifm_active |= IFM_NONE;
   12991 		return;
   12992 	}
   12993 
   12994 	ifmr->ifm_status |= IFM_ACTIVE;
   12995 	/* Only 82545 is LX */
   12996 	if (sc->sc_type == WM_T_82545)
   12997 		ifmr->ifm_active |= IFM_1000_LX;
   12998 	else
   12999 		ifmr->ifm_active |= IFM_1000_SX;
   13000 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   13001 		ifmr->ifm_active |= IFM_FDX;
   13002 	else
   13003 		ifmr->ifm_active |= IFM_HDX;
   13004 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13005 	if (ctrl & CTRL_RFCE)
   13006 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   13007 	if (ctrl & CTRL_TFCE)
   13008 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   13009 }
   13010 
   13011 /* XXX TBI only */
   13012 static int
   13013 wm_check_for_link(struct wm_softc *sc)
   13014 {
   13015 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   13016 	uint32_t rxcw;
   13017 	uint32_t ctrl;
   13018 	uint32_t status;
   13019 	bool signal;
   13020 
   13021 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
   13022 		device_xname(sc->sc_dev), __func__));
   13023 
   13024 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   13025 		/* XXX need some work for >= 82571 */
   13026 		if (sc->sc_type >= WM_T_82571) {
   13027 			sc->sc_tbi_linkup = 1;
   13028 			return 0;
   13029 		}
   13030 	}
   13031 
   13032 	rxcw = CSR_READ(sc, WMREG_RXCW);
   13033 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13034 	status = CSR_READ(sc, WMREG_STATUS);
   13035 	signal = wm_tbi_havesignal(sc, ctrl);
   13036 
   13037 	DPRINTF(sc, WM_DEBUG_LINK,
   13038 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   13039 		device_xname(sc->sc_dev), __func__, signal,
   13040 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   13041 
   13042 	/*
   13043 	 * SWDPIN   LU RXCW
   13044 	 *	0    0	  0
   13045 	 *	0    0	  1	(should not happen)
   13046 	 *	0    1	  0	(should not happen)
   13047 	 *	0    1	  1	(should not happen)
   13048 	 *	1    0	  0	Disable autonego and force linkup
   13049 	 *	1    0	  1	got /C/ but not linkup yet
   13050 	 *	1    1	  0	(linkup)
   13051 	 *	1    1	  1	If IFM_AUTO, back to autonego
   13052 	 *
   13053 	 */
   13054 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   13055 		DPRINTF(sc, WM_DEBUG_LINK,
   13056 		    ("%s: %s: force linkup and fullduplex\n",
   13057 			device_xname(sc->sc_dev), __func__));
   13058 		sc->sc_tbi_linkup = 0;
   13059 		/* Disable auto-negotiation in the TXCW register */
   13060 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   13061 
   13062 		/*
   13063 		 * Force link-up and also force full-duplex.
   13064 		 *
   13065 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   13066 		 * so we should update sc->sc_ctrl
   13067 		 */
   13068 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   13069 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13070 	} else if (((status & STATUS_LU) != 0)
   13071 	    && ((rxcw & RXCW_C) != 0)
   13072 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   13073 		sc->sc_tbi_linkup = 1;
   13074 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   13075 			device_xname(sc->sc_dev), __func__));
   13076 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13077 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   13078 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   13079 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
   13080 			device_xname(sc->sc_dev), __func__));
   13081 	} else {
   13082 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   13083 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   13084 			status));
   13085 	}
   13086 
   13087 	return 0;
   13088 }
   13089 
   13090 /*
   13091  * wm_tbi_tick:
   13092  *
   13093  *	Check the link on TBI devices.
   13094  *	This function acts as mii_tick().
   13095  */
   13096 static void
   13097 wm_tbi_tick(struct wm_softc *sc)
   13098 {
   13099 	struct mii_data *mii = &sc->sc_mii;
   13100 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13101 	uint32_t status;
   13102 
   13103 	KASSERT(WM_CORE_LOCKED(sc));
   13104 
   13105 	status = CSR_READ(sc, WMREG_STATUS);
   13106 
   13107 	/* XXX is this needed? */
   13108 	(void)CSR_READ(sc, WMREG_RXCW);
   13109 	(void)CSR_READ(sc, WMREG_CTRL);
   13110 
   13111 	/* set link status */
   13112 	if ((status & STATUS_LU) == 0) {
   13113 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   13114 			device_xname(sc->sc_dev)));
   13115 		sc->sc_tbi_linkup = 0;
   13116 	} else if (sc->sc_tbi_linkup == 0) {
   13117 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   13118 			device_xname(sc->sc_dev),
   13119 			(status & STATUS_FD) ? "FDX" : "HDX"));
   13120 		sc->sc_tbi_linkup = 1;
   13121 		sc->sc_tbi_serdes_ticks = 0;
   13122 	}
   13123 
   13124 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   13125 		goto setled;
   13126 
   13127 	if ((status & STATUS_LU) == 0) {
   13128 		sc->sc_tbi_linkup = 0;
   13129 		/* If the timer expired, retry autonegotiation */
   13130 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13131 		    && (++sc->sc_tbi_serdes_ticks
   13132 			>= sc->sc_tbi_serdes_anegticks)) {
   13133 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13134 				device_xname(sc->sc_dev), __func__));
   13135 			sc->sc_tbi_serdes_ticks = 0;
   13136 			/*
   13137 			 * Reset the link, and let autonegotiation do
   13138 			 * its thing
   13139 			 */
   13140 			sc->sc_ctrl |= CTRL_LRST;
   13141 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13142 			CSR_WRITE_FLUSH(sc);
   13143 			delay(1000);
   13144 			sc->sc_ctrl &= ~CTRL_LRST;
   13145 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13146 			CSR_WRITE_FLUSH(sc);
   13147 			delay(1000);
   13148 			CSR_WRITE(sc, WMREG_TXCW,
   13149 			    sc->sc_txcw & ~TXCW_ANE);
   13150 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13151 		}
   13152 	}
   13153 
   13154 setled:
   13155 	wm_tbi_serdes_set_linkled(sc);
   13156 }
   13157 
   13158 /* SERDES related */
   13159 static void
   13160 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   13161 {
   13162 	uint32_t reg;
   13163 
   13164 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13165 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13166 		return;
   13167 
   13168 	/* Enable PCS to turn on link */
   13169 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   13170 	reg |= PCS_CFG_PCS_EN;
   13171 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   13172 
   13173 	/* Power up the laser */
   13174 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13175 	reg &= ~CTRL_EXT_SWDPIN(3);
   13176 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13177 
   13178 	/* Flush the write to verify completion */
   13179 	CSR_WRITE_FLUSH(sc);
   13180 	delay(1000);
   13181 }
   13182 
   13183 static int
   13184 wm_serdes_mediachange(struct ifnet *ifp)
   13185 {
   13186 	struct wm_softc *sc = ifp->if_softc;
   13187 	bool pcs_autoneg = true; /* XXX */
   13188 	uint32_t ctrl_ext, pcs_lctl, reg;
   13189 
   13190 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13191 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13192 		return 0;
   13193 
   13194 	/* XXX Currently, this function is not called on 8257[12] */
   13195 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   13196 	    || (sc->sc_type >= WM_T_82575))
   13197 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   13198 
   13199 	/* Power on the sfp cage if present */
   13200 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13201 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   13202 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   13203 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13204 
   13205 	sc->sc_ctrl |= CTRL_SLU;
   13206 
   13207 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   13208 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   13209 
   13210 		reg = CSR_READ(sc, WMREG_CONNSW);
   13211 		reg |= CONNSW_ENRGSRC;
   13212 		CSR_WRITE(sc, WMREG_CONNSW, reg);
   13213 	}
   13214 
   13215 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   13216 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   13217 	case CTRL_EXT_LINK_MODE_SGMII:
   13218 		/* SGMII mode lets the phy handle forcing speed/duplex */
   13219 		pcs_autoneg = true;
   13220 		/* Autoneg time out should be disabled for SGMII mode */
   13221 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   13222 		break;
   13223 	case CTRL_EXT_LINK_MODE_1000KX:
   13224 		pcs_autoneg = false;
   13225 		/* FALLTHROUGH */
   13226 	default:
   13227 		if ((sc->sc_type == WM_T_82575)
   13228 		    || (sc->sc_type == WM_T_82576)) {
   13229 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   13230 				pcs_autoneg = false;
   13231 		}
   13232 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   13233 		    | CTRL_FRCFDX;
   13234 
   13235 		/* Set speed of 1000/Full if speed/duplex is forced */
   13236 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   13237 	}
   13238 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13239 
   13240 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   13241 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   13242 
   13243 	if (pcs_autoneg) {
   13244 		/* Set PCS register for autoneg */
   13245 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   13246 
   13247 		/* Disable force flow control for autoneg */
   13248 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   13249 
   13250 		/* Configure flow control advertisement for autoneg */
   13251 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   13252 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   13253 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   13254 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   13255 	} else
   13256 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   13257 
   13258 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   13259 
   13260 	return 0;
   13261 }
   13262 
   13263 static void
   13264 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13265 {
   13266 	struct wm_softc *sc = ifp->if_softc;
   13267 	struct mii_data *mii = &sc->sc_mii;
   13268 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13269 	uint32_t pcs_adv, pcs_lpab, reg;
   13270 
   13271 	ifmr->ifm_status = IFM_AVALID;
   13272 	ifmr->ifm_active = IFM_ETHER;
   13273 
   13274 	/* Check PCS */
   13275 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13276 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   13277 		ifmr->ifm_active |= IFM_NONE;
   13278 		sc->sc_tbi_linkup = 0;
   13279 		goto setled;
   13280 	}
   13281 
   13282 	sc->sc_tbi_linkup = 1;
   13283 	ifmr->ifm_status |= IFM_ACTIVE;
   13284 	if (sc->sc_type == WM_T_I354) {
   13285 		uint32_t status;
   13286 
   13287 		status = CSR_READ(sc, WMREG_STATUS);
   13288 		if (((status & STATUS_2P5_SKU) != 0)
   13289 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   13290 			ifmr->ifm_active |= IFM_2500_KX;
   13291 		} else
   13292 			ifmr->ifm_active |= IFM_1000_KX;
   13293 	} else {
   13294 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   13295 		case PCS_LSTS_SPEED_10:
   13296 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   13297 			break;
   13298 		case PCS_LSTS_SPEED_100:
   13299 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   13300 			break;
   13301 		case PCS_LSTS_SPEED_1000:
   13302 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13303 			break;
   13304 		default:
   13305 			device_printf(sc->sc_dev, "Unknown speed\n");
   13306 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13307 			break;
   13308 		}
   13309 	}
   13310 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   13311 	if ((reg & PCS_LSTS_FDX) != 0)
   13312 		ifmr->ifm_active |= IFM_FDX;
   13313 	else
   13314 		ifmr->ifm_active |= IFM_HDX;
   13315 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   13316 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   13317 		/* Check flow */
   13318 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13319 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   13320 			DPRINTF(sc, WM_DEBUG_LINK,
   13321 			    ("XXX LINKOK but not ACOMP\n"));
   13322 			goto setled;
   13323 		}
   13324 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   13325 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   13326 		DPRINTF(sc, WM_DEBUG_LINK,
   13327 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   13328 		if ((pcs_adv & TXCW_SYM_PAUSE)
   13329 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   13330 			mii->mii_media_active |= IFM_FLOW
   13331 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   13332 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   13333 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   13334 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   13335 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   13336 			mii->mii_media_active |= IFM_FLOW
   13337 			    | IFM_ETH_TXPAUSE;
   13338 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   13339 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   13340 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   13341 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   13342 			mii->mii_media_active |= IFM_FLOW
   13343 			    | IFM_ETH_RXPAUSE;
   13344 		}
   13345 	}
   13346 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   13347 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   13348 setled:
   13349 	wm_tbi_serdes_set_linkled(sc);
   13350 }
   13351 
   13352 /*
   13353  * wm_serdes_tick:
   13354  *
   13355  *	Check the link on serdes devices.
   13356  */
   13357 static void
   13358 wm_serdes_tick(struct wm_softc *sc)
   13359 {
   13360 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   13361 	struct mii_data *mii = &sc->sc_mii;
   13362 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13363 	uint32_t reg;
   13364 
   13365 	KASSERT(WM_CORE_LOCKED(sc));
   13366 
   13367 	mii->mii_media_status = IFM_AVALID;
   13368 	mii->mii_media_active = IFM_ETHER;
   13369 
   13370 	/* Check PCS */
   13371 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13372 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   13373 		mii->mii_media_status |= IFM_ACTIVE;
   13374 		sc->sc_tbi_linkup = 1;
   13375 		sc->sc_tbi_serdes_ticks = 0;
   13376 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   13377 		if ((reg & PCS_LSTS_FDX) != 0)
   13378 			mii->mii_media_active |= IFM_FDX;
   13379 		else
   13380 			mii->mii_media_active |= IFM_HDX;
   13381 	} else {
   13382 		mii->mii_media_status |= IFM_NONE;
   13383 		sc->sc_tbi_linkup = 0;
   13384 		/* If the timer expired, retry autonegotiation */
   13385 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13386 		    && (++sc->sc_tbi_serdes_ticks
   13387 			>= sc->sc_tbi_serdes_anegticks)) {
   13388 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13389 				device_xname(sc->sc_dev), __func__));
   13390 			sc->sc_tbi_serdes_ticks = 0;
   13391 			/* XXX */
   13392 			wm_serdes_mediachange(ifp);
   13393 		}
   13394 	}
   13395 
   13396 	wm_tbi_serdes_set_linkled(sc);
   13397 }
   13398 
   13399 /* SFP related */
   13400 
   13401 static int
   13402 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   13403 {
   13404 	uint32_t i2ccmd;
   13405 	int i;
   13406 
   13407 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   13408 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   13409 
   13410 	/* Poll the ready bit */
   13411 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   13412 		delay(50);
   13413 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   13414 		if (i2ccmd & I2CCMD_READY)
   13415 			break;
   13416 	}
   13417 	if ((i2ccmd & I2CCMD_READY) == 0)
   13418 		return -1;
   13419 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   13420 		return -1;
   13421 
   13422 	*data = i2ccmd & 0x00ff;
   13423 
   13424 	return 0;
   13425 }
   13426 
   13427 static uint32_t
   13428 wm_sfp_get_media_type(struct wm_softc *sc)
   13429 {
   13430 	uint32_t ctrl_ext;
   13431 	uint8_t val = 0;
   13432 	int timeout = 3;
   13433 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   13434 	int rv = -1;
   13435 
   13436 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13437 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   13438 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   13439 	CSR_WRITE_FLUSH(sc);
   13440 
   13441 	/* Read SFP module data */
   13442 	while (timeout) {
   13443 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   13444 		if (rv == 0)
   13445 			break;
   13446 		delay(100*1000); /* XXX too big */
   13447 		timeout--;
   13448 	}
   13449 	if (rv != 0)
   13450 		goto out;
   13451 
   13452 	switch (val) {
   13453 	case SFF_SFP_ID_SFF:
   13454 		aprint_normal_dev(sc->sc_dev,
   13455 		    "Module/Connector soldered to board\n");
   13456 		break;
   13457 	case SFF_SFP_ID_SFP:
   13458 		sc->sc_flags |= WM_F_SFP;
   13459 		break;
   13460 	case SFF_SFP_ID_UNKNOWN:
   13461 		goto out;
   13462 	default:
   13463 		break;
   13464 	}
   13465 
   13466 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   13467 	if (rv != 0)
   13468 		goto out;
   13469 
   13470 	sc->sc_sfptype = val;
   13471 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   13472 		mediatype = WM_MEDIATYPE_SERDES;
   13473 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   13474 		sc->sc_flags |= WM_F_SGMII;
   13475 		mediatype = WM_MEDIATYPE_COPPER;
   13476 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   13477 		sc->sc_flags |= WM_F_SGMII;
   13478 		mediatype = WM_MEDIATYPE_SERDES;
   13479 	} else {
   13480 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   13481 		    __func__, sc->sc_sfptype);
   13482 		sc->sc_sfptype = 0; /* XXX unknown */
   13483 	}
   13484 
   13485 out:
   13486 	/* Restore I2C interface setting */
   13487 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13488 
   13489 	return mediatype;
   13490 }
   13491 
   13492 /*
   13493  * NVM related.
   13494  * Microwire, SPI (w/wo EERD) and Flash.
   13495  */
   13496 
   13497 /* Both spi and uwire */
   13498 
   13499 /*
   13500  * wm_eeprom_sendbits:
   13501  *
   13502  *	Send a series of bits to the EEPROM.
   13503  */
   13504 static void
   13505 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   13506 {
   13507 	uint32_t reg;
   13508 	int x;
   13509 
   13510 	reg = CSR_READ(sc, WMREG_EECD);
   13511 
   13512 	for (x = nbits; x > 0; x--) {
   13513 		if (bits & (1U << (x - 1)))
   13514 			reg |= EECD_DI;
   13515 		else
   13516 			reg &= ~EECD_DI;
   13517 		CSR_WRITE(sc, WMREG_EECD, reg);
   13518 		CSR_WRITE_FLUSH(sc);
   13519 		delay(2);
   13520 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13521 		CSR_WRITE_FLUSH(sc);
   13522 		delay(2);
   13523 		CSR_WRITE(sc, WMREG_EECD, reg);
   13524 		CSR_WRITE_FLUSH(sc);
   13525 		delay(2);
   13526 	}
   13527 }
   13528 
   13529 /*
   13530  * wm_eeprom_recvbits:
   13531  *
   13532  *	Receive a series of bits from the EEPROM.
   13533  */
   13534 static void
   13535 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   13536 {
   13537 	uint32_t reg, val;
   13538 	int x;
   13539 
   13540 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   13541 
   13542 	val = 0;
   13543 	for (x = nbits; x > 0; x--) {
   13544 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13545 		CSR_WRITE_FLUSH(sc);
   13546 		delay(2);
   13547 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   13548 			val |= (1U << (x - 1));
   13549 		CSR_WRITE(sc, WMREG_EECD, reg);
   13550 		CSR_WRITE_FLUSH(sc);
   13551 		delay(2);
   13552 	}
   13553 	*valp = val;
   13554 }
   13555 
   13556 /* Microwire */
   13557 
   13558 /*
   13559  * wm_nvm_read_uwire:
   13560  *
   13561  *	Read a word from the EEPROM using the MicroWire protocol.
   13562  */
   13563 static int
   13564 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13565 {
   13566 	uint32_t reg, val;
   13567 	int i;
   13568 
   13569 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13570 		device_xname(sc->sc_dev), __func__));
   13571 
   13572 	if (sc->nvm.acquire(sc) != 0)
   13573 		return -1;
   13574 
   13575 	for (i = 0; i < wordcnt; i++) {
   13576 		/* Clear SK and DI. */
   13577 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   13578 		CSR_WRITE(sc, WMREG_EECD, reg);
   13579 
   13580 		/*
   13581 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   13582 		 * and Xen.
   13583 		 *
   13584 		 * We use this workaround only for 82540 because qemu's
   13585 		 * e1000 act as 82540.
   13586 		 */
   13587 		if (sc->sc_type == WM_T_82540) {
   13588 			reg |= EECD_SK;
   13589 			CSR_WRITE(sc, WMREG_EECD, reg);
   13590 			reg &= ~EECD_SK;
   13591 			CSR_WRITE(sc, WMREG_EECD, reg);
   13592 			CSR_WRITE_FLUSH(sc);
   13593 			delay(2);
   13594 		}
   13595 		/* XXX: end of workaround */
   13596 
   13597 		/* Set CHIP SELECT. */
   13598 		reg |= EECD_CS;
   13599 		CSR_WRITE(sc, WMREG_EECD, reg);
   13600 		CSR_WRITE_FLUSH(sc);
   13601 		delay(2);
   13602 
   13603 		/* Shift in the READ command. */
   13604 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   13605 
   13606 		/* Shift in address. */
   13607 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   13608 
   13609 		/* Shift out the data. */
   13610 		wm_eeprom_recvbits(sc, &val, 16);
   13611 		data[i] = val & 0xffff;
   13612 
   13613 		/* Clear CHIP SELECT. */
   13614 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   13615 		CSR_WRITE(sc, WMREG_EECD, reg);
   13616 		CSR_WRITE_FLUSH(sc);
   13617 		delay(2);
   13618 	}
   13619 
   13620 	sc->nvm.release(sc);
   13621 	return 0;
   13622 }
   13623 
   13624 /* SPI */
   13625 
   13626 /*
   13627  * Set SPI and FLASH related information from the EECD register.
   13628  * For 82541 and 82547, the word size is taken from EEPROM.
   13629  */
   13630 static int
   13631 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   13632 {
   13633 	int size;
   13634 	uint32_t reg;
   13635 	uint16_t data;
   13636 
   13637 	reg = CSR_READ(sc, WMREG_EECD);
   13638 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   13639 
   13640 	/* Read the size of NVM from EECD by default */
   13641 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13642 	switch (sc->sc_type) {
   13643 	case WM_T_82541:
   13644 	case WM_T_82541_2:
   13645 	case WM_T_82547:
   13646 	case WM_T_82547_2:
   13647 		/* Set dummy value to access EEPROM */
   13648 		sc->sc_nvm_wordsize = 64;
   13649 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   13650 			aprint_error_dev(sc->sc_dev,
   13651 			    "%s: failed to read EEPROM size\n", __func__);
   13652 		}
   13653 		reg = data;
   13654 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13655 		if (size == 0)
   13656 			size = 6; /* 64 word size */
   13657 		else
   13658 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   13659 		break;
   13660 	case WM_T_80003:
   13661 	case WM_T_82571:
   13662 	case WM_T_82572:
   13663 	case WM_T_82573: /* SPI case */
   13664 	case WM_T_82574: /* SPI case */
   13665 	case WM_T_82583: /* SPI case */
   13666 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13667 		if (size > 14)
   13668 			size = 14;
   13669 		break;
   13670 	case WM_T_82575:
   13671 	case WM_T_82576:
   13672 	case WM_T_82580:
   13673 	case WM_T_I350:
   13674 	case WM_T_I354:
   13675 	case WM_T_I210:
   13676 	case WM_T_I211:
   13677 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13678 		if (size > 15)
   13679 			size = 15;
   13680 		break;
   13681 	default:
   13682 		aprint_error_dev(sc->sc_dev,
   13683 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   13684 		return -1;
   13685 		break;
   13686 	}
   13687 
   13688 	sc->sc_nvm_wordsize = 1 << size;
   13689 
   13690 	return 0;
   13691 }
   13692 
   13693 /*
   13694  * wm_nvm_ready_spi:
   13695  *
   13696  *	Wait for a SPI EEPROM to be ready for commands.
   13697  */
   13698 static int
   13699 wm_nvm_ready_spi(struct wm_softc *sc)
   13700 {
   13701 	uint32_t val;
   13702 	int usec;
   13703 
   13704 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13705 		device_xname(sc->sc_dev), __func__));
   13706 
   13707 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   13708 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   13709 		wm_eeprom_recvbits(sc, &val, 8);
   13710 		if ((val & SPI_SR_RDY) == 0)
   13711 			break;
   13712 	}
   13713 	if (usec >= SPI_MAX_RETRIES) {
   13714 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   13715 		return -1;
   13716 	}
   13717 	return 0;
   13718 }
   13719 
   13720 /*
   13721  * wm_nvm_read_spi:
   13722  *
   13723  *	Read a work from the EEPROM using the SPI protocol.
   13724  */
   13725 static int
   13726 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13727 {
   13728 	uint32_t reg, val;
   13729 	int i;
   13730 	uint8_t opc;
   13731 	int rv = 0;
   13732 
   13733 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13734 		device_xname(sc->sc_dev), __func__));
   13735 
   13736 	if (sc->nvm.acquire(sc) != 0)
   13737 		return -1;
   13738 
   13739 	/* Clear SK and CS. */
   13740 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   13741 	CSR_WRITE(sc, WMREG_EECD, reg);
   13742 	CSR_WRITE_FLUSH(sc);
   13743 	delay(2);
   13744 
   13745 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   13746 		goto out;
   13747 
   13748 	/* Toggle CS to flush commands. */
   13749 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   13750 	CSR_WRITE_FLUSH(sc);
   13751 	delay(2);
   13752 	CSR_WRITE(sc, WMREG_EECD, reg);
   13753 	CSR_WRITE_FLUSH(sc);
   13754 	delay(2);
   13755 
   13756 	opc = SPI_OPC_READ;
   13757 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   13758 		opc |= SPI_OPC_A8;
   13759 
   13760 	wm_eeprom_sendbits(sc, opc, 8);
   13761 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   13762 
   13763 	for (i = 0; i < wordcnt; i++) {
   13764 		wm_eeprom_recvbits(sc, &val, 16);
   13765 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   13766 	}
   13767 
   13768 	/* Raise CS and clear SK. */
   13769 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   13770 	CSR_WRITE(sc, WMREG_EECD, reg);
   13771 	CSR_WRITE_FLUSH(sc);
   13772 	delay(2);
   13773 
   13774 out:
   13775 	sc->nvm.release(sc);
   13776 	return rv;
   13777 }
   13778 
   13779 /* Using with EERD */
   13780 
   13781 static int
   13782 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   13783 {
   13784 	uint32_t attempts = 100000;
   13785 	uint32_t i, reg = 0;
   13786 	int32_t done = -1;
   13787 
   13788 	for (i = 0; i < attempts; i++) {
   13789 		reg = CSR_READ(sc, rw);
   13790 
   13791 		if (reg & EERD_DONE) {
   13792 			done = 0;
   13793 			break;
   13794 		}
   13795 		delay(5);
   13796 	}
   13797 
   13798 	return done;
   13799 }
   13800 
   13801 static int
   13802 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   13803 {
   13804 	int i, eerd = 0;
   13805 	int rv = 0;
   13806 
   13807 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13808 		device_xname(sc->sc_dev), __func__));
   13809 
   13810 	if (sc->nvm.acquire(sc) != 0)
   13811 		return -1;
   13812 
   13813 	for (i = 0; i < wordcnt; i++) {
   13814 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   13815 		CSR_WRITE(sc, WMREG_EERD, eerd);
   13816 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   13817 		if (rv != 0) {
   13818 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   13819 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   13820 			break;
   13821 		}
   13822 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   13823 	}
   13824 
   13825 	sc->nvm.release(sc);
   13826 	return rv;
   13827 }
   13828 
   13829 /* Flash */
   13830 
   13831 static int
   13832 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   13833 {
   13834 	uint32_t eecd;
   13835 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   13836 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   13837 	uint32_t nvm_dword = 0;
   13838 	uint8_t sig_byte = 0;
   13839 	int rv;
   13840 
   13841 	switch (sc->sc_type) {
   13842 	case WM_T_PCH_SPT:
   13843 	case WM_T_PCH_CNP:
   13844 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   13845 		act_offset = ICH_NVM_SIG_WORD * 2;
   13846 
   13847 		/* Set bank to 0 in case flash read fails. */
   13848 		*bank = 0;
   13849 
   13850 		/* Check bank 0 */
   13851 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   13852 		if (rv != 0)
   13853 			return rv;
   13854 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13855 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13856 			*bank = 0;
   13857 			return 0;
   13858 		}
   13859 
   13860 		/* Check bank 1 */
   13861 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   13862 		    &nvm_dword);
   13863 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13864 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13865 			*bank = 1;
   13866 			return 0;
   13867 		}
   13868 		aprint_error_dev(sc->sc_dev,
   13869 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   13870 		return -1;
   13871 	case WM_T_ICH8:
   13872 	case WM_T_ICH9:
   13873 		eecd = CSR_READ(sc, WMREG_EECD);
   13874 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   13875 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   13876 			return 0;
   13877 		}
   13878 		/* FALLTHROUGH */
   13879 	default:
   13880 		/* Default to 0 */
   13881 		*bank = 0;
   13882 
   13883 		/* Check bank 0 */
   13884 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   13885 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13886 			*bank = 0;
   13887 			return 0;
   13888 		}
   13889 
   13890 		/* Check bank 1 */
   13891 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   13892 		    &sig_byte);
   13893 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13894 			*bank = 1;
   13895 			return 0;
   13896 		}
   13897 	}
   13898 
   13899 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   13900 		device_xname(sc->sc_dev)));
   13901 	return -1;
   13902 }
   13903 
   13904 /******************************************************************************
   13905  * This function does initial flash setup so that a new read/write/erase cycle
   13906  * can be started.
   13907  *
   13908  * sc - The pointer to the hw structure
   13909  ****************************************************************************/
   13910 static int32_t
   13911 wm_ich8_cycle_init(struct wm_softc *sc)
   13912 {
   13913 	uint16_t hsfsts;
   13914 	int32_t error = 1;
   13915 	int32_t i     = 0;
   13916 
   13917 	if (sc->sc_type >= WM_T_PCH_SPT)
   13918 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   13919 	else
   13920 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13921 
   13922 	/* May be check the Flash Des Valid bit in Hw status */
   13923 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   13924 		return error;
   13925 
   13926 	/* Clear FCERR in Hw status by writing 1 */
   13927 	/* Clear DAEL in Hw status by writing a 1 */
   13928 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   13929 
   13930 	if (sc->sc_type >= WM_T_PCH_SPT)
   13931 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   13932 	else
   13933 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13934 
   13935 	/*
   13936 	 * Either we should have a hardware SPI cycle in progress bit to check
   13937 	 * against, in order to start a new cycle or FDONE bit should be
   13938 	 * changed in the hardware so that it is 1 after hardware reset, which
   13939 	 * can then be used as an indication whether a cycle is in progress or
   13940 	 * has been completed .. we should also have some software semaphore
   13941 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   13942 	 * threads access to those bits can be sequentiallized or a way so that
   13943 	 * 2 threads don't start the cycle at the same time
   13944 	 */
   13945 
   13946 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13947 		/*
   13948 		 * There is no cycle running at present, so we can start a
   13949 		 * cycle
   13950 		 */
   13951 
   13952 		/* Begin by setting Flash Cycle Done. */
   13953 		hsfsts |= HSFSTS_DONE;
   13954 		if (sc->sc_type >= WM_T_PCH_SPT)
   13955 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13956 			    hsfsts & 0xffffUL);
   13957 		else
   13958 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13959 		error = 0;
   13960 	} else {
   13961 		/*
   13962 		 * Otherwise poll for sometime so the current cycle has a
   13963 		 * chance to end before giving up.
   13964 		 */
   13965 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   13966 			if (sc->sc_type >= WM_T_PCH_SPT)
   13967 				hsfsts = ICH8_FLASH_READ32(sc,
   13968 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13969 			else
   13970 				hsfsts = ICH8_FLASH_READ16(sc,
   13971 				    ICH_FLASH_HSFSTS);
   13972 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13973 				error = 0;
   13974 				break;
   13975 			}
   13976 			delay(1);
   13977 		}
   13978 		if (error == 0) {
   13979 			/*
   13980 			 * Successful in waiting for previous cycle to timeout,
   13981 			 * now set the Flash Cycle Done.
   13982 			 */
   13983 			hsfsts |= HSFSTS_DONE;
   13984 			if (sc->sc_type >= WM_T_PCH_SPT)
   13985 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13986 				    hsfsts & 0xffffUL);
   13987 			else
   13988 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   13989 				    hsfsts);
   13990 		}
   13991 	}
   13992 	return error;
   13993 }
   13994 
   13995 /******************************************************************************
   13996  * This function starts a flash cycle and waits for its completion
   13997  *
   13998  * sc - The pointer to the hw structure
   13999  ****************************************************************************/
   14000 static int32_t
   14001 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   14002 {
   14003 	uint16_t hsflctl;
   14004 	uint16_t hsfsts;
   14005 	int32_t error = 1;
   14006 	uint32_t i = 0;
   14007 
   14008 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   14009 	if (sc->sc_type >= WM_T_PCH_SPT)
   14010 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   14011 	else
   14012 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14013 	hsflctl |= HSFCTL_GO;
   14014 	if (sc->sc_type >= WM_T_PCH_SPT)
   14015 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14016 		    (uint32_t)hsflctl << 16);
   14017 	else
   14018 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14019 
   14020 	/* Wait till FDONE bit is set to 1 */
   14021 	do {
   14022 		if (sc->sc_type >= WM_T_PCH_SPT)
   14023 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14024 			    & 0xffffUL;
   14025 		else
   14026 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   14027 		if (hsfsts & HSFSTS_DONE)
   14028 			break;
   14029 		delay(1);
   14030 		i++;
   14031 	} while (i < timeout);
   14032 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   14033 		error = 0;
   14034 
   14035 	return error;
   14036 }
   14037 
   14038 /******************************************************************************
   14039  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   14040  *
   14041  * sc - The pointer to the hw structure
   14042  * index - The index of the byte or word to read.
   14043  * size - Size of data to read, 1=byte 2=word, 4=dword
   14044  * data - Pointer to the word to store the value read.
   14045  *****************************************************************************/
   14046 static int32_t
   14047 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   14048     uint32_t size, uint32_t *data)
   14049 {
   14050 	uint16_t hsfsts;
   14051 	uint16_t hsflctl;
   14052 	uint32_t flash_linear_address;
   14053 	uint32_t flash_data = 0;
   14054 	int32_t error = 1;
   14055 	int32_t count = 0;
   14056 
   14057 	if (size < 1  || size > 4 || data == 0x0 ||
   14058 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   14059 		return error;
   14060 
   14061 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   14062 	    sc->sc_ich8_flash_base;
   14063 
   14064 	do {
   14065 		delay(1);
   14066 		/* Steps */
   14067 		error = wm_ich8_cycle_init(sc);
   14068 		if (error)
   14069 			break;
   14070 
   14071 		if (sc->sc_type >= WM_T_PCH_SPT)
   14072 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14073 			    >> 16;
   14074 		else
   14075 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14076 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   14077 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   14078 		    & HSFCTL_BCOUNT_MASK;
   14079 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   14080 		if (sc->sc_type >= WM_T_PCH_SPT) {
   14081 			/*
   14082 			 * In SPT, This register is in Lan memory space, not
   14083 			 * flash. Therefore, only 32 bit access is supported.
   14084 			 */
   14085 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14086 			    (uint32_t)hsflctl << 16);
   14087 		} else
   14088 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14089 
   14090 		/*
   14091 		 * Write the last 24 bits of index into Flash Linear address
   14092 		 * field in Flash Address
   14093 		 */
   14094 		/* TODO: TBD maybe check the index against the size of flash */
   14095 
   14096 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   14097 
   14098 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   14099 
   14100 		/*
   14101 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   14102 		 * the whole sequence a few more times, else read in (shift in)
   14103 		 * the Flash Data0, the order is least significant byte first
   14104 		 * msb to lsb
   14105 		 */
   14106 		if (error == 0) {
   14107 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   14108 			if (size == 1)
   14109 				*data = (uint8_t)(flash_data & 0x000000FF);
   14110 			else if (size == 2)
   14111 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   14112 			else if (size == 4)
   14113 				*data = (uint32_t)flash_data;
   14114 			break;
   14115 		} else {
   14116 			/*
   14117 			 * If we've gotten here, then things are probably
   14118 			 * completely hosed, but if the error condition is
   14119 			 * detected, it won't hurt to give it another try...
   14120 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   14121 			 */
   14122 			if (sc->sc_type >= WM_T_PCH_SPT)
   14123 				hsfsts = ICH8_FLASH_READ32(sc,
   14124 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14125 			else
   14126 				hsfsts = ICH8_FLASH_READ16(sc,
   14127 				    ICH_FLASH_HSFSTS);
   14128 
   14129 			if (hsfsts & HSFSTS_ERR) {
   14130 				/* Repeat for some time before giving up. */
   14131 				continue;
   14132 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   14133 				break;
   14134 		}
   14135 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   14136 
   14137 	return error;
   14138 }
   14139 
   14140 /******************************************************************************
   14141  * Reads a single byte from the NVM using the ICH8 flash access registers.
   14142  *
   14143  * sc - pointer to wm_hw structure
   14144  * index - The index of the byte to read.
   14145  * data - Pointer to a byte to store the value read.
   14146  *****************************************************************************/
   14147 static int32_t
   14148 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   14149 {
   14150 	int32_t status;
   14151 	uint32_t word = 0;
   14152 
   14153 	status = wm_read_ich8_data(sc, index, 1, &word);
   14154 	if (status == 0)
   14155 		*data = (uint8_t)word;
   14156 	else
   14157 		*data = 0;
   14158 
   14159 	return status;
   14160 }
   14161 
   14162 /******************************************************************************
   14163  * Reads a word from the NVM using the ICH8 flash access registers.
   14164  *
   14165  * sc - pointer to wm_hw structure
   14166  * index - The starting byte index of the word to read.
   14167  * data - Pointer to a word to store the value read.
   14168  *****************************************************************************/
   14169 static int32_t
   14170 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   14171 {
   14172 	int32_t status;
   14173 	uint32_t word = 0;
   14174 
   14175 	status = wm_read_ich8_data(sc, index, 2, &word);
   14176 	if (status == 0)
   14177 		*data = (uint16_t)word;
   14178 	else
   14179 		*data = 0;
   14180 
   14181 	return status;
   14182 }
   14183 
   14184 /******************************************************************************
   14185  * Reads a dword from the NVM using the ICH8 flash access registers.
   14186  *
   14187  * sc - pointer to wm_hw structure
   14188  * index - The starting byte index of the word to read.
   14189  * data - Pointer to a word to store the value read.
   14190  *****************************************************************************/
   14191 static int32_t
   14192 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   14193 {
   14194 	int32_t status;
   14195 
   14196 	status = wm_read_ich8_data(sc, index, 4, data);
   14197 	return status;
   14198 }
   14199 
   14200 /******************************************************************************
   14201  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   14202  * register.
   14203  *
   14204  * sc - Struct containing variables accessed by shared code
   14205  * offset - offset of word in the EEPROM to read
   14206  * data - word read from the EEPROM
   14207  * words - number of words to read
   14208  *****************************************************************************/
   14209 static int
   14210 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14211 {
   14212 	int32_t	 rv = 0;
   14213 	uint32_t flash_bank = 0;
   14214 	uint32_t act_offset = 0;
   14215 	uint32_t bank_offset = 0;
   14216 	uint16_t word = 0;
   14217 	uint16_t i = 0;
   14218 
   14219 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14220 		device_xname(sc->sc_dev), __func__));
   14221 
   14222 	if (sc->nvm.acquire(sc) != 0)
   14223 		return -1;
   14224 
   14225 	/*
   14226 	 * We need to know which is the valid flash bank.  In the event
   14227 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14228 	 * managing flash_bank. So it cannot be trusted and needs
   14229 	 * to be updated with each read.
   14230 	 */
   14231 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14232 	if (rv) {
   14233 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14234 			device_xname(sc->sc_dev)));
   14235 		flash_bank = 0;
   14236 	}
   14237 
   14238 	/*
   14239 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14240 	 * size
   14241 	 */
   14242 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14243 
   14244 	for (i = 0; i < words; i++) {
   14245 		/* The NVM part needs a byte offset, hence * 2 */
   14246 		act_offset = bank_offset + ((offset + i) * 2);
   14247 		rv = wm_read_ich8_word(sc, act_offset, &word);
   14248 		if (rv) {
   14249 			aprint_error_dev(sc->sc_dev,
   14250 			    "%s: failed to read NVM\n", __func__);
   14251 			break;
   14252 		}
   14253 		data[i] = word;
   14254 	}
   14255 
   14256 	sc->nvm.release(sc);
   14257 	return rv;
   14258 }
   14259 
   14260 /******************************************************************************
   14261  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   14262  * register.
   14263  *
   14264  * sc - Struct containing variables accessed by shared code
   14265  * offset - offset of word in the EEPROM to read
   14266  * data - word read from the EEPROM
   14267  * words - number of words to read
   14268  *****************************************************************************/
   14269 static int
   14270 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14271 {
   14272 	int32_t	 rv = 0;
   14273 	uint32_t flash_bank = 0;
   14274 	uint32_t act_offset = 0;
   14275 	uint32_t bank_offset = 0;
   14276 	uint32_t dword = 0;
   14277 	uint16_t i = 0;
   14278 
   14279 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14280 		device_xname(sc->sc_dev), __func__));
   14281 
   14282 	if (sc->nvm.acquire(sc) != 0)
   14283 		return -1;
   14284 
   14285 	/*
   14286 	 * We need to know which is the valid flash bank.  In the event
   14287 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14288 	 * managing flash_bank. So it cannot be trusted and needs
   14289 	 * to be updated with each read.
   14290 	 */
   14291 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14292 	if (rv) {
   14293 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14294 			device_xname(sc->sc_dev)));
   14295 		flash_bank = 0;
   14296 	}
   14297 
   14298 	/*
   14299 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14300 	 * size
   14301 	 */
   14302 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14303 
   14304 	for (i = 0; i < words; i++) {
   14305 		/* The NVM part needs a byte offset, hence * 2 */
   14306 		act_offset = bank_offset + ((offset + i) * 2);
   14307 		/* but we must read dword aligned, so mask ... */
   14308 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   14309 		if (rv) {
   14310 			aprint_error_dev(sc->sc_dev,
   14311 			    "%s: failed to read NVM\n", __func__);
   14312 			break;
   14313 		}
   14314 		/* ... and pick out low or high word */
   14315 		if ((act_offset & 0x2) == 0)
   14316 			data[i] = (uint16_t)(dword & 0xFFFF);
   14317 		else
   14318 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   14319 	}
   14320 
   14321 	sc->nvm.release(sc);
   14322 	return rv;
   14323 }
   14324 
   14325 /* iNVM */
   14326 
   14327 static int
   14328 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   14329 {
   14330 	int32_t	 rv = 0;
   14331 	uint32_t invm_dword;
   14332 	uint16_t i;
   14333 	uint8_t record_type, word_address;
   14334 
   14335 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14336 		device_xname(sc->sc_dev), __func__));
   14337 
   14338 	for (i = 0; i < INVM_SIZE; i++) {
   14339 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   14340 		/* Get record type */
   14341 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   14342 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   14343 			break;
   14344 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   14345 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   14346 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   14347 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   14348 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   14349 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   14350 			if (word_address == address) {
   14351 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   14352 				rv = 0;
   14353 				break;
   14354 			}
   14355 		}
   14356 	}
   14357 
   14358 	return rv;
   14359 }
   14360 
   14361 static int
   14362 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14363 {
   14364 	int rv = 0;
   14365 	int i;
   14366 
   14367 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14368 		device_xname(sc->sc_dev), __func__));
   14369 
   14370 	if (sc->nvm.acquire(sc) != 0)
   14371 		return -1;
   14372 
   14373 	for (i = 0; i < words; i++) {
   14374 		switch (offset + i) {
   14375 		case NVM_OFF_MACADDR:
   14376 		case NVM_OFF_MACADDR1:
   14377 		case NVM_OFF_MACADDR2:
   14378 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   14379 			if (rv != 0) {
   14380 				data[i] = 0xffff;
   14381 				rv = -1;
   14382 			}
   14383 			break;
   14384 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   14385 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14386 			if (rv != 0) {
   14387 				*data = INVM_DEFAULT_AL;
   14388 				rv = 0;
   14389 			}
   14390 			break;
   14391 		case NVM_OFF_CFG2:
   14392 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14393 			if (rv != 0) {
   14394 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   14395 				rv = 0;
   14396 			}
   14397 			break;
   14398 		case NVM_OFF_CFG4:
   14399 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14400 			if (rv != 0) {
   14401 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   14402 				rv = 0;
   14403 			}
   14404 			break;
   14405 		case NVM_OFF_LED_1_CFG:
   14406 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14407 			if (rv != 0) {
   14408 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   14409 				rv = 0;
   14410 			}
   14411 			break;
   14412 		case NVM_OFF_LED_0_2_CFG:
   14413 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14414 			if (rv != 0) {
   14415 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   14416 				rv = 0;
   14417 			}
   14418 			break;
   14419 		case NVM_OFF_ID_LED_SETTINGS:
   14420 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14421 			if (rv != 0) {
   14422 				*data = ID_LED_RESERVED_FFFF;
   14423 				rv = 0;
   14424 			}
   14425 			break;
   14426 		default:
   14427 			DPRINTF(sc, WM_DEBUG_NVM,
   14428 			    ("NVM word 0x%02x is not mapped.\n", offset));
   14429 			*data = NVM_RESERVED_WORD;
   14430 			break;
   14431 		}
   14432 	}
   14433 
   14434 	sc->nvm.release(sc);
   14435 	return rv;
   14436 }
   14437 
   14438 /* Lock, detecting NVM type, validate checksum, version and read */
   14439 
   14440 static int
   14441 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   14442 {
   14443 	uint32_t eecd = 0;
   14444 
   14445 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   14446 	    || sc->sc_type == WM_T_82583) {
   14447 		eecd = CSR_READ(sc, WMREG_EECD);
   14448 
   14449 		/* Isolate bits 15 & 16 */
   14450 		eecd = ((eecd >> 15) & 0x03);
   14451 
   14452 		/* If both bits are set, device is Flash type */
   14453 		if (eecd == 0x03)
   14454 			return 0;
   14455 	}
   14456 	return 1;
   14457 }
   14458 
   14459 static int
   14460 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   14461 {
   14462 	uint32_t eec;
   14463 
   14464 	eec = CSR_READ(sc, WMREG_EEC);
   14465 	if ((eec & EEC_FLASH_DETECTED) != 0)
   14466 		return 1;
   14467 
   14468 	return 0;
   14469 }
   14470 
   14471 /*
   14472  * wm_nvm_validate_checksum
   14473  *
   14474  * The checksum is defined as the sum of the first 64 (16 bit) words.
   14475  */
   14476 static int
   14477 wm_nvm_validate_checksum(struct wm_softc *sc)
   14478 {
   14479 	uint16_t checksum;
   14480 	uint16_t eeprom_data;
   14481 #ifdef WM_DEBUG
   14482 	uint16_t csum_wordaddr, valid_checksum;
   14483 #endif
   14484 	int i;
   14485 
   14486 	checksum = 0;
   14487 
   14488 	/* Don't check for I211 */
   14489 	if (sc->sc_type == WM_T_I211)
   14490 		return 0;
   14491 
   14492 #ifdef WM_DEBUG
   14493 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   14494 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   14495 		csum_wordaddr = NVM_OFF_COMPAT;
   14496 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   14497 	} else {
   14498 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   14499 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   14500 	}
   14501 
   14502 	/* Dump EEPROM image for debug */
   14503 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14504 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14505 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   14506 		/* XXX PCH_SPT? */
   14507 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   14508 		if ((eeprom_data & valid_checksum) == 0)
   14509 			DPRINTF(sc, WM_DEBUG_NVM,
   14510 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   14511 				device_xname(sc->sc_dev), eeprom_data,
   14512 				    valid_checksum));
   14513 	}
   14514 
   14515 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
   14516 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   14517 		for (i = 0; i < NVM_SIZE; i++) {
   14518 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14519 				printf("XXXX ");
   14520 			else
   14521 				printf("%04hx ", eeprom_data);
   14522 			if (i % 8 == 7)
   14523 				printf("\n");
   14524 		}
   14525 	}
   14526 
   14527 #endif /* WM_DEBUG */
   14528 
   14529 	for (i = 0; i < NVM_SIZE; i++) {
   14530 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14531 			return 1;
   14532 		checksum += eeprom_data;
   14533 	}
   14534 
   14535 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   14536 #ifdef WM_DEBUG
   14537 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   14538 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   14539 #endif
   14540 	}
   14541 
   14542 	return 0;
   14543 }
   14544 
   14545 static void
   14546 wm_nvm_version_invm(struct wm_softc *sc)
   14547 {
   14548 	uint32_t dword;
   14549 
   14550 	/*
   14551 	 * Linux's code to decode version is very strange, so we don't
   14552 	 * obey that algorithm and just use word 61 as the document.
   14553 	 * Perhaps it's not perfect though...
   14554 	 *
   14555 	 * Example:
   14556 	 *
   14557 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   14558 	 */
   14559 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   14560 	dword = __SHIFTOUT(dword, INVM_VER_1);
   14561 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   14562 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   14563 }
   14564 
   14565 static void
   14566 wm_nvm_version(struct wm_softc *sc)
   14567 {
   14568 	uint16_t major, minor, build, patch;
   14569 	uint16_t uid0, uid1;
   14570 	uint16_t nvm_data;
   14571 	uint16_t off;
   14572 	bool check_version = false;
   14573 	bool check_optionrom = false;
   14574 	bool have_build = false;
   14575 	bool have_uid = true;
   14576 
   14577 	/*
   14578 	 * Version format:
   14579 	 *
   14580 	 * XYYZ
   14581 	 * X0YZ
   14582 	 * X0YY
   14583 	 *
   14584 	 * Example:
   14585 	 *
   14586 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   14587 	 *	82571	0x50a6	5.10.6?
   14588 	 *	82572	0x506a	5.6.10?
   14589 	 *	82572EI	0x5069	5.6.9?
   14590 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   14591 	 *		0x2013	2.1.3?
   14592 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   14593 	 * ICH8+82567	0x0040	0.4.0?
   14594 	 * ICH9+82566	0x1040	1.4.0?
   14595 	 *ICH10+82567	0x0043	0.4.3?
   14596 	 *  PCH+82577	0x00c1	0.12.1?
   14597 	 * PCH2+82579	0x00d3	0.13.3?
   14598 	 *		0x00d4	0.13.4?
   14599 	 *  LPT+I218	0x0023	0.2.3?
   14600 	 *  SPT+I219	0x0084	0.8.4?
   14601 	 *  CNP+I219	0x0054	0.5.4?
   14602 	 */
   14603 
   14604 	/*
   14605 	 * XXX
   14606 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   14607 	 * I've never seen real 82574 hardware with such small SPI ROM.
   14608 	 */
   14609 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   14610 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   14611 		have_uid = false;
   14612 
   14613 	switch (sc->sc_type) {
   14614 	case WM_T_82571:
   14615 	case WM_T_82572:
   14616 	case WM_T_82574:
   14617 	case WM_T_82583:
   14618 		check_version = true;
   14619 		check_optionrom = true;
   14620 		have_build = true;
   14621 		break;
   14622 	case WM_T_ICH8:
   14623 	case WM_T_ICH9:
   14624 	case WM_T_ICH10:
   14625 	case WM_T_PCH:
   14626 	case WM_T_PCH2:
   14627 	case WM_T_PCH_LPT:
   14628 	case WM_T_PCH_SPT:
   14629 	case WM_T_PCH_CNP:
   14630 		check_version = true;
   14631 		have_build = true;
   14632 		have_uid = false;
   14633 		break;
   14634 	case WM_T_82575:
   14635 	case WM_T_82576:
   14636 	case WM_T_82580:
   14637 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   14638 			check_version = true;
   14639 		break;
   14640 	case WM_T_I211:
   14641 		wm_nvm_version_invm(sc);
   14642 		have_uid = false;
   14643 		goto printver;
   14644 	case WM_T_I210:
   14645 		if (!wm_nvm_flash_presence_i210(sc)) {
   14646 			wm_nvm_version_invm(sc);
   14647 			have_uid = false;
   14648 			goto printver;
   14649 		}
   14650 		/* FALLTHROUGH */
   14651 	case WM_T_I350:
   14652 	case WM_T_I354:
   14653 		check_version = true;
   14654 		check_optionrom = true;
   14655 		break;
   14656 	default:
   14657 		return;
   14658 	}
   14659 	if (check_version
   14660 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   14661 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   14662 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   14663 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   14664 			build = nvm_data & NVM_BUILD_MASK;
   14665 			have_build = true;
   14666 		} else
   14667 			minor = nvm_data & 0x00ff;
   14668 
   14669 		/* Decimal */
   14670 		minor = (minor / 16) * 10 + (minor % 16);
   14671 		sc->sc_nvm_ver_major = major;
   14672 		sc->sc_nvm_ver_minor = minor;
   14673 
   14674 printver:
   14675 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   14676 		    sc->sc_nvm_ver_minor);
   14677 		if (have_build) {
   14678 			sc->sc_nvm_ver_build = build;
   14679 			aprint_verbose(".%d", build);
   14680 		}
   14681 	}
   14682 
   14683 	/* Assume the Option ROM area is at avove NVM_SIZE */
   14684 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   14685 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   14686 		/* Option ROM Version */
   14687 		if ((off != 0x0000) && (off != 0xffff)) {
   14688 			int rv;
   14689 
   14690 			off += NVM_COMBO_VER_OFF;
   14691 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   14692 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   14693 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   14694 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   14695 				/* 16bits */
   14696 				major = uid0 >> 8;
   14697 				build = (uid0 << 8) | (uid1 >> 8);
   14698 				patch = uid1 & 0x00ff;
   14699 				aprint_verbose(", option ROM Version %d.%d.%d",
   14700 				    major, build, patch);
   14701 			}
   14702 		}
   14703 	}
   14704 
   14705 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   14706 		aprint_verbose(", Image Unique ID %08x",
   14707 		    ((uint32_t)uid1 << 16) | uid0);
   14708 }
   14709 
   14710 /*
   14711  * wm_nvm_read:
   14712  *
   14713  *	Read data from the serial EEPROM.
   14714  */
   14715 static int
   14716 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14717 {
   14718 	int rv;
   14719 
   14720 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14721 		device_xname(sc->sc_dev), __func__));
   14722 
   14723 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   14724 		return -1;
   14725 
   14726 	rv = sc->nvm.read(sc, word, wordcnt, data);
   14727 
   14728 	return rv;
   14729 }
   14730 
   14731 /*
   14732  * Hardware semaphores.
   14733  * Very complexed...
   14734  */
   14735 
   14736 static int
   14737 wm_get_null(struct wm_softc *sc)
   14738 {
   14739 
   14740 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14741 		device_xname(sc->sc_dev), __func__));
   14742 	return 0;
   14743 }
   14744 
   14745 static void
   14746 wm_put_null(struct wm_softc *sc)
   14747 {
   14748 
   14749 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14750 		device_xname(sc->sc_dev), __func__));
   14751 	return;
   14752 }
   14753 
   14754 static int
   14755 wm_get_eecd(struct wm_softc *sc)
   14756 {
   14757 	uint32_t reg;
   14758 	int x;
   14759 
   14760 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14761 		device_xname(sc->sc_dev), __func__));
   14762 
   14763 	reg = CSR_READ(sc, WMREG_EECD);
   14764 
   14765 	/* Request EEPROM access. */
   14766 	reg |= EECD_EE_REQ;
   14767 	CSR_WRITE(sc, WMREG_EECD, reg);
   14768 
   14769 	/* ..and wait for it to be granted. */
   14770 	for (x = 0; x < 1000; x++) {
   14771 		reg = CSR_READ(sc, WMREG_EECD);
   14772 		if (reg & EECD_EE_GNT)
   14773 			break;
   14774 		delay(5);
   14775 	}
   14776 	if ((reg & EECD_EE_GNT) == 0) {
   14777 		aprint_error_dev(sc->sc_dev,
   14778 		    "could not acquire EEPROM GNT\n");
   14779 		reg &= ~EECD_EE_REQ;
   14780 		CSR_WRITE(sc, WMREG_EECD, reg);
   14781 		return -1;
   14782 	}
   14783 
   14784 	return 0;
   14785 }
   14786 
   14787 static void
   14788 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   14789 {
   14790 
   14791 	*eecd |= EECD_SK;
   14792 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14793 	CSR_WRITE_FLUSH(sc);
   14794 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14795 		delay(1);
   14796 	else
   14797 		delay(50);
   14798 }
   14799 
   14800 static void
   14801 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   14802 {
   14803 
   14804 	*eecd &= ~EECD_SK;
   14805 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14806 	CSR_WRITE_FLUSH(sc);
   14807 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14808 		delay(1);
   14809 	else
   14810 		delay(50);
   14811 }
   14812 
   14813 static void
   14814 wm_put_eecd(struct wm_softc *sc)
   14815 {
   14816 	uint32_t reg;
   14817 
   14818 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14819 		device_xname(sc->sc_dev), __func__));
   14820 
   14821 	/* Stop nvm */
   14822 	reg = CSR_READ(sc, WMREG_EECD);
   14823 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   14824 		/* Pull CS high */
   14825 		reg |= EECD_CS;
   14826 		wm_nvm_eec_clock_lower(sc, &reg);
   14827 	} else {
   14828 		/* CS on Microwire is active-high */
   14829 		reg &= ~(EECD_CS | EECD_DI);
   14830 		CSR_WRITE(sc, WMREG_EECD, reg);
   14831 		wm_nvm_eec_clock_raise(sc, &reg);
   14832 		wm_nvm_eec_clock_lower(sc, &reg);
   14833 	}
   14834 
   14835 	reg = CSR_READ(sc, WMREG_EECD);
   14836 	reg &= ~EECD_EE_REQ;
   14837 	CSR_WRITE(sc, WMREG_EECD, reg);
   14838 
   14839 	return;
   14840 }
   14841 
   14842 /*
   14843  * Get hardware semaphore.
   14844  * Same as e1000_get_hw_semaphore_generic()
   14845  */
   14846 static int
   14847 wm_get_swsm_semaphore(struct wm_softc *sc)
   14848 {
   14849 	int32_t timeout;
   14850 	uint32_t swsm;
   14851 
   14852 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14853 		device_xname(sc->sc_dev), __func__));
   14854 	KASSERT(sc->sc_nvm_wordsize > 0);
   14855 
   14856 retry:
   14857 	/* Get the SW semaphore. */
   14858 	timeout = sc->sc_nvm_wordsize + 1;
   14859 	while (timeout) {
   14860 		swsm = CSR_READ(sc, WMREG_SWSM);
   14861 
   14862 		if ((swsm & SWSM_SMBI) == 0)
   14863 			break;
   14864 
   14865 		delay(50);
   14866 		timeout--;
   14867 	}
   14868 
   14869 	if (timeout == 0) {
   14870 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   14871 			/*
   14872 			 * In rare circumstances, the SW semaphore may already
   14873 			 * be held unintentionally. Clear the semaphore once
   14874 			 * before giving up.
   14875 			 */
   14876 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   14877 			wm_put_swsm_semaphore(sc);
   14878 			goto retry;
   14879 		}
   14880 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
   14881 		return 1;
   14882 	}
   14883 
   14884 	/* Get the FW semaphore. */
   14885 	timeout = sc->sc_nvm_wordsize + 1;
   14886 	while (timeout) {
   14887 		swsm = CSR_READ(sc, WMREG_SWSM);
   14888 		swsm |= SWSM_SWESMBI;
   14889 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   14890 		/* If we managed to set the bit we got the semaphore. */
   14891 		swsm = CSR_READ(sc, WMREG_SWSM);
   14892 		if (swsm & SWSM_SWESMBI)
   14893 			break;
   14894 
   14895 		delay(50);
   14896 		timeout--;
   14897 	}
   14898 
   14899 	if (timeout == 0) {
   14900 		aprint_error_dev(sc->sc_dev,
   14901 		    "could not acquire SWSM SWESMBI\n");
   14902 		/* Release semaphores */
   14903 		wm_put_swsm_semaphore(sc);
   14904 		return 1;
   14905 	}
   14906 	return 0;
   14907 }
   14908 
   14909 /*
   14910  * Put hardware semaphore.
   14911  * Same as e1000_put_hw_semaphore_generic()
   14912  */
   14913 static void
   14914 wm_put_swsm_semaphore(struct wm_softc *sc)
   14915 {
   14916 	uint32_t swsm;
   14917 
   14918 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14919 		device_xname(sc->sc_dev), __func__));
   14920 
   14921 	swsm = CSR_READ(sc, WMREG_SWSM);
   14922 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   14923 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   14924 }
   14925 
   14926 /*
   14927  * Get SW/FW semaphore.
   14928  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   14929  */
   14930 static int
   14931 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14932 {
   14933 	uint32_t swfw_sync;
   14934 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   14935 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   14936 	int timeout;
   14937 
   14938 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14939 		device_xname(sc->sc_dev), __func__));
   14940 
   14941 	if (sc->sc_type == WM_T_80003)
   14942 		timeout = 50;
   14943 	else
   14944 		timeout = 200;
   14945 
   14946 	while (timeout) {
   14947 		if (wm_get_swsm_semaphore(sc)) {
   14948 			aprint_error_dev(sc->sc_dev,
   14949 			    "%s: failed to get semaphore\n",
   14950 			    __func__);
   14951 			return 1;
   14952 		}
   14953 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14954 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   14955 			swfw_sync |= swmask;
   14956 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14957 			wm_put_swsm_semaphore(sc);
   14958 			return 0;
   14959 		}
   14960 		wm_put_swsm_semaphore(sc);
   14961 		delay(5000);
   14962 		timeout--;
   14963 	}
   14964 	device_printf(sc->sc_dev,
   14965 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   14966 	    mask, swfw_sync);
   14967 	return 1;
   14968 }
   14969 
   14970 static void
   14971 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14972 {
   14973 	uint32_t swfw_sync;
   14974 
   14975 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14976 		device_xname(sc->sc_dev), __func__));
   14977 
   14978 	while (wm_get_swsm_semaphore(sc) != 0)
   14979 		continue;
   14980 
   14981 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14982 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   14983 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14984 
   14985 	wm_put_swsm_semaphore(sc);
   14986 }
   14987 
   14988 static int
   14989 wm_get_nvm_80003(struct wm_softc *sc)
   14990 {
   14991 	int rv;
   14992 
   14993 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14994 		device_xname(sc->sc_dev), __func__));
   14995 
   14996 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   14997 		aprint_error_dev(sc->sc_dev,
   14998 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   14999 		return rv;
   15000 	}
   15001 
   15002 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15003 	    && (rv = wm_get_eecd(sc)) != 0) {
   15004 		aprint_error_dev(sc->sc_dev,
   15005 		    "%s: failed to get semaphore(EECD)\n", __func__);
   15006 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15007 		return rv;
   15008 	}
   15009 
   15010 	return 0;
   15011 }
   15012 
   15013 static void
   15014 wm_put_nvm_80003(struct wm_softc *sc)
   15015 {
   15016 
   15017 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15018 		device_xname(sc->sc_dev), __func__));
   15019 
   15020 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15021 		wm_put_eecd(sc);
   15022 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15023 }
   15024 
   15025 static int
   15026 wm_get_nvm_82571(struct wm_softc *sc)
   15027 {
   15028 	int rv;
   15029 
   15030 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15031 		device_xname(sc->sc_dev), __func__));
   15032 
   15033 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   15034 		return rv;
   15035 
   15036 	switch (sc->sc_type) {
   15037 	case WM_T_82573:
   15038 		break;
   15039 	default:
   15040 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15041 			rv = wm_get_eecd(sc);
   15042 		break;
   15043 	}
   15044 
   15045 	if (rv != 0) {
   15046 		aprint_error_dev(sc->sc_dev,
   15047 		    "%s: failed to get semaphore\n",
   15048 		    __func__);
   15049 		wm_put_swsm_semaphore(sc);
   15050 	}
   15051 
   15052 	return rv;
   15053 }
   15054 
   15055 static void
   15056 wm_put_nvm_82571(struct wm_softc *sc)
   15057 {
   15058 
   15059 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15060 		device_xname(sc->sc_dev), __func__));
   15061 
   15062 	switch (sc->sc_type) {
   15063 	case WM_T_82573:
   15064 		break;
   15065 	default:
   15066 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15067 			wm_put_eecd(sc);
   15068 		break;
   15069 	}
   15070 
   15071 	wm_put_swsm_semaphore(sc);
   15072 }
   15073 
   15074 static int
   15075 wm_get_phy_82575(struct wm_softc *sc)
   15076 {
   15077 
   15078 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15079 		device_xname(sc->sc_dev), __func__));
   15080 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15081 }
   15082 
   15083 static void
   15084 wm_put_phy_82575(struct wm_softc *sc)
   15085 {
   15086 
   15087 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15088 		device_xname(sc->sc_dev), __func__));
   15089 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15090 }
   15091 
   15092 static int
   15093 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   15094 {
   15095 	uint32_t ext_ctrl;
   15096 	int timeout = 200;
   15097 
   15098 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15099 		device_xname(sc->sc_dev), __func__));
   15100 
   15101 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15102 	for (timeout = 0; timeout < 200; timeout++) {
   15103 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15104 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15105 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15106 
   15107 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15108 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15109 			return 0;
   15110 		delay(5000);
   15111 	}
   15112 	device_printf(sc->sc_dev,
   15113 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   15114 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15115 	return 1;
   15116 }
   15117 
   15118 static void
   15119 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   15120 {
   15121 	uint32_t ext_ctrl;
   15122 
   15123 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15124 		device_xname(sc->sc_dev), __func__));
   15125 
   15126 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15127 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15128 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15129 
   15130 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15131 }
   15132 
   15133 static int
   15134 wm_get_swflag_ich8lan(struct wm_softc *sc)
   15135 {
   15136 	uint32_t ext_ctrl;
   15137 	int timeout;
   15138 
   15139 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15140 		device_xname(sc->sc_dev), __func__));
   15141 	mutex_enter(sc->sc_ich_phymtx);
   15142 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   15143 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15144 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   15145 			break;
   15146 		delay(1000);
   15147 	}
   15148 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   15149 		device_printf(sc->sc_dev,
   15150 		    "SW has already locked the resource\n");
   15151 		goto out;
   15152 	}
   15153 
   15154 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15155 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15156 	for (timeout = 0; timeout < 1000; timeout++) {
   15157 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15158 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15159 			break;
   15160 		delay(1000);
   15161 	}
   15162 	if (timeout >= 1000) {
   15163 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   15164 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15165 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15166 		goto out;
   15167 	}
   15168 	return 0;
   15169 
   15170 out:
   15171 	mutex_exit(sc->sc_ich_phymtx);
   15172 	return 1;
   15173 }
   15174 
   15175 static void
   15176 wm_put_swflag_ich8lan(struct wm_softc *sc)
   15177 {
   15178 	uint32_t ext_ctrl;
   15179 
   15180 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15181 		device_xname(sc->sc_dev), __func__));
   15182 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15183 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   15184 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15185 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15186 	} else
   15187 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   15188 
   15189 	mutex_exit(sc->sc_ich_phymtx);
   15190 }
   15191 
   15192 static int
   15193 wm_get_nvm_ich8lan(struct wm_softc *sc)
   15194 {
   15195 
   15196 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15197 		device_xname(sc->sc_dev), __func__));
   15198 	mutex_enter(sc->sc_ich_nvmmtx);
   15199 
   15200 	return 0;
   15201 }
   15202 
   15203 static void
   15204 wm_put_nvm_ich8lan(struct wm_softc *sc)
   15205 {
   15206 
   15207 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15208 		device_xname(sc->sc_dev), __func__));
   15209 	mutex_exit(sc->sc_ich_nvmmtx);
   15210 }
   15211 
   15212 static int
   15213 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   15214 {
   15215 	int i = 0;
   15216 	uint32_t reg;
   15217 
   15218 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15219 		device_xname(sc->sc_dev), __func__));
   15220 
   15221 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15222 	do {
   15223 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   15224 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15225 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15226 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   15227 			break;
   15228 		delay(2*1000);
   15229 		i++;
   15230 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   15231 
   15232 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   15233 		wm_put_hw_semaphore_82573(sc);
   15234 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   15235 		    device_xname(sc->sc_dev));
   15236 		return -1;
   15237 	}
   15238 
   15239 	return 0;
   15240 }
   15241 
   15242 static void
   15243 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   15244 {
   15245 	uint32_t reg;
   15246 
   15247 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15248 		device_xname(sc->sc_dev), __func__));
   15249 
   15250 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15251 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15252 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15253 }
   15254 
   15255 /*
   15256  * Management mode and power management related subroutines.
   15257  * BMC, AMT, suspend/resume and EEE.
   15258  */
   15259 
   15260 #ifdef WM_WOL
   15261 static int
   15262 wm_check_mng_mode(struct wm_softc *sc)
   15263 {
   15264 	int rv;
   15265 
   15266 	switch (sc->sc_type) {
   15267 	case WM_T_ICH8:
   15268 	case WM_T_ICH9:
   15269 	case WM_T_ICH10:
   15270 	case WM_T_PCH:
   15271 	case WM_T_PCH2:
   15272 	case WM_T_PCH_LPT:
   15273 	case WM_T_PCH_SPT:
   15274 	case WM_T_PCH_CNP:
   15275 		rv = wm_check_mng_mode_ich8lan(sc);
   15276 		break;
   15277 	case WM_T_82574:
   15278 	case WM_T_82583:
   15279 		rv = wm_check_mng_mode_82574(sc);
   15280 		break;
   15281 	case WM_T_82571:
   15282 	case WM_T_82572:
   15283 	case WM_T_82573:
   15284 	case WM_T_80003:
   15285 		rv = wm_check_mng_mode_generic(sc);
   15286 		break;
   15287 	default:
   15288 		/* Noting to do */
   15289 		rv = 0;
   15290 		break;
   15291 	}
   15292 
   15293 	return rv;
   15294 }
   15295 
   15296 static int
   15297 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   15298 {
   15299 	uint32_t fwsm;
   15300 
   15301 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15302 
   15303 	if (((fwsm & FWSM_FW_VALID) != 0)
   15304 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   15305 		return 1;
   15306 
   15307 	return 0;
   15308 }
   15309 
   15310 static int
   15311 wm_check_mng_mode_82574(struct wm_softc *sc)
   15312 {
   15313 	uint16_t data;
   15314 
   15315 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15316 
   15317 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   15318 		return 1;
   15319 
   15320 	return 0;
   15321 }
   15322 
   15323 static int
   15324 wm_check_mng_mode_generic(struct wm_softc *sc)
   15325 {
   15326 	uint32_t fwsm;
   15327 
   15328 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15329 
   15330 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   15331 		return 1;
   15332 
   15333 	return 0;
   15334 }
   15335 #endif /* WM_WOL */
   15336 
   15337 static int
   15338 wm_enable_mng_pass_thru(struct wm_softc *sc)
   15339 {
   15340 	uint32_t manc, fwsm, factps;
   15341 
   15342 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   15343 		return 0;
   15344 
   15345 	manc = CSR_READ(sc, WMREG_MANC);
   15346 
   15347 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   15348 		device_xname(sc->sc_dev), manc));
   15349 	if ((manc & MANC_RECV_TCO_EN) == 0)
   15350 		return 0;
   15351 
   15352 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   15353 		fwsm = CSR_READ(sc, WMREG_FWSM);
   15354 		factps = CSR_READ(sc, WMREG_FACTPS);
   15355 		if (((factps & FACTPS_MNGCG) == 0)
   15356 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   15357 			return 1;
   15358 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   15359 		uint16_t data;
   15360 
   15361 		factps = CSR_READ(sc, WMREG_FACTPS);
   15362 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15363 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   15364 			device_xname(sc->sc_dev), factps, data));
   15365 		if (((factps & FACTPS_MNGCG) == 0)
   15366 		    && ((data & NVM_CFG2_MNGM_MASK)
   15367 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   15368 			return 1;
   15369 	} else if (((manc & MANC_SMBUS_EN) != 0)
   15370 	    && ((manc & MANC_ASF_EN) == 0))
   15371 		return 1;
   15372 
   15373 	return 0;
   15374 }
   15375 
   15376 static bool
   15377 wm_phy_resetisblocked(struct wm_softc *sc)
   15378 {
   15379 	bool blocked = false;
   15380 	uint32_t reg;
   15381 	int i = 0;
   15382 
   15383 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15384 		device_xname(sc->sc_dev), __func__));
   15385 
   15386 	switch (sc->sc_type) {
   15387 	case WM_T_ICH8:
   15388 	case WM_T_ICH9:
   15389 	case WM_T_ICH10:
   15390 	case WM_T_PCH:
   15391 	case WM_T_PCH2:
   15392 	case WM_T_PCH_LPT:
   15393 	case WM_T_PCH_SPT:
   15394 	case WM_T_PCH_CNP:
   15395 		do {
   15396 			reg = CSR_READ(sc, WMREG_FWSM);
   15397 			if ((reg & FWSM_RSPCIPHY) == 0) {
   15398 				blocked = true;
   15399 				delay(10*1000);
   15400 				continue;
   15401 			}
   15402 			blocked = false;
   15403 		} while (blocked && (i++ < 30));
   15404 		return blocked;
   15405 		break;
   15406 	case WM_T_82571:
   15407 	case WM_T_82572:
   15408 	case WM_T_82573:
   15409 	case WM_T_82574:
   15410 	case WM_T_82583:
   15411 	case WM_T_80003:
   15412 		reg = CSR_READ(sc, WMREG_MANC);
   15413 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   15414 			return true;
   15415 		else
   15416 			return false;
   15417 		break;
   15418 	default:
   15419 		/* No problem */
   15420 		break;
   15421 	}
   15422 
   15423 	return false;
   15424 }
   15425 
   15426 static void
   15427 wm_get_hw_control(struct wm_softc *sc)
   15428 {
   15429 	uint32_t reg;
   15430 
   15431 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15432 		device_xname(sc->sc_dev), __func__));
   15433 
   15434 	if (sc->sc_type == WM_T_82573) {
   15435 		reg = CSR_READ(sc, WMREG_SWSM);
   15436 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   15437 	} else if (sc->sc_type >= WM_T_82571) {
   15438 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15439 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   15440 	}
   15441 }
   15442 
   15443 static void
   15444 wm_release_hw_control(struct wm_softc *sc)
   15445 {
   15446 	uint32_t reg;
   15447 
   15448 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15449 		device_xname(sc->sc_dev), __func__));
   15450 
   15451 	if (sc->sc_type == WM_T_82573) {
   15452 		reg = CSR_READ(sc, WMREG_SWSM);
   15453 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   15454 	} else if (sc->sc_type >= WM_T_82571) {
   15455 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15456 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   15457 	}
   15458 }
   15459 
   15460 static void
   15461 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   15462 {
   15463 	uint32_t reg;
   15464 
   15465 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15466 		device_xname(sc->sc_dev), __func__));
   15467 
   15468 	if (sc->sc_type < WM_T_PCH2)
   15469 		return;
   15470 
   15471 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15472 
   15473 	if (gate)
   15474 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   15475 	else
   15476 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   15477 
   15478 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15479 }
   15480 
   15481 static int
   15482 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   15483 {
   15484 	uint32_t fwsm, reg;
   15485 	int rv = 0;
   15486 
   15487 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15488 		device_xname(sc->sc_dev), __func__));
   15489 
   15490 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   15491 	wm_gate_hw_phy_config_ich8lan(sc, true);
   15492 
   15493 	/* Disable ULP */
   15494 	wm_ulp_disable(sc);
   15495 
   15496 	/* Acquire PHY semaphore */
   15497 	rv = sc->phy.acquire(sc);
   15498 	if (rv != 0) {
   15499 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   15500 		device_xname(sc->sc_dev), __func__));
   15501 		return -1;
   15502 	}
   15503 
   15504 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   15505 	 * inaccessible and resetting the PHY is not blocked, toggle the
   15506 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   15507 	 */
   15508 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15509 	switch (sc->sc_type) {
   15510 	case WM_T_PCH_LPT:
   15511 	case WM_T_PCH_SPT:
   15512 	case WM_T_PCH_CNP:
   15513 		if (wm_phy_is_accessible_pchlan(sc))
   15514 			break;
   15515 
   15516 		/* Before toggling LANPHYPC, see if PHY is accessible by
   15517 		 * forcing MAC to SMBus mode first.
   15518 		 */
   15519 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15520 		reg |= CTRL_EXT_FORCE_SMBUS;
   15521 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15522 #if 0
   15523 		/* XXX Isn't this required??? */
   15524 		CSR_WRITE_FLUSH(sc);
   15525 #endif
   15526 		/* Wait 50 milliseconds for MAC to finish any retries
   15527 		 * that it might be trying to perform from previous
   15528 		 * attempts to acknowledge any phy read requests.
   15529 		 */
   15530 		delay(50 * 1000);
   15531 		/* FALLTHROUGH */
   15532 	case WM_T_PCH2:
   15533 		if (wm_phy_is_accessible_pchlan(sc) == true)
   15534 			break;
   15535 		/* FALLTHROUGH */
   15536 	case WM_T_PCH:
   15537 		if (sc->sc_type == WM_T_PCH)
   15538 			if ((fwsm & FWSM_FW_VALID) != 0)
   15539 				break;
   15540 
   15541 		if (wm_phy_resetisblocked(sc) == true) {
   15542 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   15543 			break;
   15544 		}
   15545 
   15546 		/* Toggle LANPHYPC Value bit */
   15547 		wm_toggle_lanphypc_pch_lpt(sc);
   15548 
   15549 		if (sc->sc_type >= WM_T_PCH_LPT) {
   15550 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15551 				break;
   15552 
   15553 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   15554 			 * so ensure that the MAC is also out of SMBus mode
   15555 			 */
   15556 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15557 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15558 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15559 
   15560 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15561 				break;
   15562 			rv = -1;
   15563 		}
   15564 		break;
   15565 	default:
   15566 		break;
   15567 	}
   15568 
   15569 	/* Release semaphore */
   15570 	sc->phy.release(sc);
   15571 
   15572 	if (rv == 0) {
   15573 		/* Check to see if able to reset PHY.  Print error if not */
   15574 		if (wm_phy_resetisblocked(sc)) {
   15575 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15576 			goto out;
   15577 		}
   15578 
   15579 		/* Reset the PHY before any access to it.  Doing so, ensures
   15580 		 * that the PHY is in a known good state before we read/write
   15581 		 * PHY registers.  The generic reset is sufficient here,
   15582 		 * because we haven't determined the PHY type yet.
   15583 		 */
   15584 		if (wm_reset_phy(sc) != 0)
   15585 			goto out;
   15586 
   15587 		/* On a successful reset, possibly need to wait for the PHY
   15588 		 * to quiesce to an accessible state before returning control
   15589 		 * to the calling function.  If the PHY does not quiesce, then
   15590 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   15591 		 *  the PHY is in.
   15592 		 */
   15593 		if (wm_phy_resetisblocked(sc))
   15594 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15595 	}
   15596 
   15597 out:
   15598 	/* Ungate automatic PHY configuration on non-managed 82579 */
   15599 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   15600 		delay(10*1000);
   15601 		wm_gate_hw_phy_config_ich8lan(sc, false);
   15602 	}
   15603 
   15604 	return 0;
   15605 }
   15606 
   15607 static void
   15608 wm_init_manageability(struct wm_softc *sc)
   15609 {
   15610 
   15611 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15612 		device_xname(sc->sc_dev), __func__));
   15613 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15614 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   15615 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15616 
   15617 		/* Disable hardware interception of ARP */
   15618 		manc &= ~MANC_ARP_EN;
   15619 
   15620 		/* Enable receiving management packets to the host */
   15621 		if (sc->sc_type >= WM_T_82571) {
   15622 			manc |= MANC_EN_MNG2HOST;
   15623 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   15624 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   15625 		}
   15626 
   15627 		CSR_WRITE(sc, WMREG_MANC, manc);
   15628 	}
   15629 }
   15630 
   15631 static void
   15632 wm_release_manageability(struct wm_softc *sc)
   15633 {
   15634 
   15635 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15636 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15637 
   15638 		manc |= MANC_ARP_EN;
   15639 		if (sc->sc_type >= WM_T_82571)
   15640 			manc &= ~MANC_EN_MNG2HOST;
   15641 
   15642 		CSR_WRITE(sc, WMREG_MANC, manc);
   15643 	}
   15644 }
   15645 
   15646 static void
   15647 wm_get_wakeup(struct wm_softc *sc)
   15648 {
   15649 
   15650 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   15651 	switch (sc->sc_type) {
   15652 	case WM_T_82573:
   15653 	case WM_T_82583:
   15654 		sc->sc_flags |= WM_F_HAS_AMT;
   15655 		/* FALLTHROUGH */
   15656 	case WM_T_80003:
   15657 	case WM_T_82575:
   15658 	case WM_T_82576:
   15659 	case WM_T_82580:
   15660 	case WM_T_I350:
   15661 	case WM_T_I354:
   15662 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   15663 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   15664 		/* FALLTHROUGH */
   15665 	case WM_T_82541:
   15666 	case WM_T_82541_2:
   15667 	case WM_T_82547:
   15668 	case WM_T_82547_2:
   15669 	case WM_T_82571:
   15670 	case WM_T_82572:
   15671 	case WM_T_82574:
   15672 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15673 		break;
   15674 	case WM_T_ICH8:
   15675 	case WM_T_ICH9:
   15676 	case WM_T_ICH10:
   15677 	case WM_T_PCH:
   15678 	case WM_T_PCH2:
   15679 	case WM_T_PCH_LPT:
   15680 	case WM_T_PCH_SPT:
   15681 	case WM_T_PCH_CNP:
   15682 		sc->sc_flags |= WM_F_HAS_AMT;
   15683 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15684 		break;
   15685 	default:
   15686 		break;
   15687 	}
   15688 
   15689 	/* 1: HAS_MANAGE */
   15690 	if (wm_enable_mng_pass_thru(sc) != 0)
   15691 		sc->sc_flags |= WM_F_HAS_MANAGE;
   15692 
   15693 	/*
   15694 	 * Note that the WOL flags is set after the resetting of the eeprom
   15695 	 * stuff
   15696 	 */
   15697 }
   15698 
   15699 /*
   15700  * Unconfigure Ultra Low Power mode.
   15701  * Only for I217 and newer (see below).
   15702  */
   15703 static int
   15704 wm_ulp_disable(struct wm_softc *sc)
   15705 {
   15706 	uint32_t reg;
   15707 	uint16_t phyreg;
   15708 	int i = 0, rv = 0;
   15709 
   15710 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15711 		device_xname(sc->sc_dev), __func__));
   15712 	/* Exclude old devices */
   15713 	if ((sc->sc_type < WM_T_PCH_LPT)
   15714 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   15715 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   15716 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   15717 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   15718 		return 0;
   15719 
   15720 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   15721 		/* Request ME un-configure ULP mode in the PHY */
   15722 		reg = CSR_READ(sc, WMREG_H2ME);
   15723 		reg &= ~H2ME_ULP;
   15724 		reg |= H2ME_ENFORCE_SETTINGS;
   15725 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15726 
   15727 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   15728 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   15729 			if (i++ == 30) {
   15730 				device_printf(sc->sc_dev, "%s timed out\n",
   15731 				    __func__);
   15732 				return -1;
   15733 			}
   15734 			delay(10 * 1000);
   15735 		}
   15736 		reg = CSR_READ(sc, WMREG_H2ME);
   15737 		reg &= ~H2ME_ENFORCE_SETTINGS;
   15738 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15739 
   15740 		return 0;
   15741 	}
   15742 
   15743 	/* Acquire semaphore */
   15744 	rv = sc->phy.acquire(sc);
   15745 	if (rv != 0) {
   15746 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   15747 		device_xname(sc->sc_dev), __func__));
   15748 		return -1;
   15749 	}
   15750 
   15751 	/* Toggle LANPHYPC */
   15752 	wm_toggle_lanphypc_pch_lpt(sc);
   15753 
   15754 	/* Unforce SMBus mode in PHY */
   15755 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   15756 	if (rv != 0) {
   15757 		uint32_t reg2;
   15758 
   15759 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   15760 			__func__);
   15761 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   15762 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   15763 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   15764 		delay(50 * 1000);
   15765 
   15766 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   15767 		    &phyreg);
   15768 		if (rv != 0)
   15769 			goto release;
   15770 	}
   15771 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15772 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   15773 
   15774 	/* Unforce SMBus mode in MAC */
   15775 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15776 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   15777 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15778 
   15779 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   15780 	if (rv != 0)
   15781 		goto release;
   15782 	phyreg |= HV_PM_CTRL_K1_ENA;
   15783 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   15784 
   15785 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   15786 		&phyreg);
   15787 	if (rv != 0)
   15788 		goto release;
   15789 	phyreg &= ~(I218_ULP_CONFIG1_IND
   15790 	    | I218_ULP_CONFIG1_STICKY_ULP
   15791 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   15792 	    | I218_ULP_CONFIG1_WOL_HOST
   15793 	    | I218_ULP_CONFIG1_INBAND_EXIT
   15794 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   15795 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   15796 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   15797 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15798 	phyreg |= I218_ULP_CONFIG1_START;
   15799 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15800 
   15801 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15802 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   15803 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15804 
   15805 release:
   15806 	/* Release semaphore */
   15807 	sc->phy.release(sc);
   15808 	wm_gmii_reset(sc);
   15809 	delay(50 * 1000);
   15810 
   15811 	return rv;
   15812 }
   15813 
   15814 /* WOL in the newer chipset interfaces (pchlan) */
   15815 static int
   15816 wm_enable_phy_wakeup(struct wm_softc *sc)
   15817 {
   15818 	device_t dev = sc->sc_dev;
   15819 	uint32_t mreg, moff;
   15820 	uint16_t wuce, wuc, wufc, preg;
   15821 	int i, rv;
   15822 
   15823 	KASSERT(sc->sc_type >= WM_T_PCH);
   15824 
   15825 	/* Copy MAC RARs to PHY RARs */
   15826 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   15827 
   15828 	/* Activate PHY wakeup */
   15829 	rv = sc->phy.acquire(sc);
   15830 	if (rv != 0) {
   15831 		device_printf(dev, "%s: failed to acquire semaphore\n",
   15832 		    __func__);
   15833 		return rv;
   15834 	}
   15835 
   15836 	/*
   15837 	 * Enable access to PHY wakeup registers.
   15838 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   15839 	 */
   15840 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   15841 	if (rv != 0) {
   15842 		device_printf(dev,
   15843 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   15844 		goto release;
   15845 	}
   15846 
   15847 	/* Copy MAC MTA to PHY MTA */
   15848 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   15849 		uint16_t lo, hi;
   15850 
   15851 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   15852 		lo = (uint16_t)(mreg & 0xffff);
   15853 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   15854 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   15855 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   15856 	}
   15857 
   15858 	/* Configure PHY Rx Control register */
   15859 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   15860 	mreg = CSR_READ(sc, WMREG_RCTL);
   15861 	if (mreg & RCTL_UPE)
   15862 		preg |= BM_RCTL_UPE;
   15863 	if (mreg & RCTL_MPE)
   15864 		preg |= BM_RCTL_MPE;
   15865 	preg &= ~(BM_RCTL_MO_MASK);
   15866 	moff = __SHIFTOUT(mreg, RCTL_MO);
   15867 	if (moff != 0)
   15868 		preg |= moff << BM_RCTL_MO_SHIFT;
   15869 	if (mreg & RCTL_BAM)
   15870 		preg |= BM_RCTL_BAM;
   15871 	if (mreg & RCTL_PMCF)
   15872 		preg |= BM_RCTL_PMCF;
   15873 	mreg = CSR_READ(sc, WMREG_CTRL);
   15874 	if (mreg & CTRL_RFCE)
   15875 		preg |= BM_RCTL_RFCE;
   15876 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   15877 
   15878 	wuc = WUC_APME | WUC_PME_EN;
   15879 	wufc = WUFC_MAG;
   15880 	/* Enable PHY wakeup in MAC register */
   15881 	CSR_WRITE(sc, WMREG_WUC,
   15882 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   15883 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   15884 
   15885 	/* Configure and enable PHY wakeup in PHY registers */
   15886 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   15887 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   15888 
   15889 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   15890 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15891 
   15892 release:
   15893 	sc->phy.release(sc);
   15894 
   15895 	return 0;
   15896 }
   15897 
   15898 /* Power down workaround on D3 */
   15899 static void
   15900 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   15901 {
   15902 	uint32_t reg;
   15903 	uint16_t phyreg;
   15904 	int i;
   15905 
   15906 	for (i = 0; i < 2; i++) {
   15907 		/* Disable link */
   15908 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15909 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15910 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15911 
   15912 		/*
   15913 		 * Call gig speed drop workaround on Gig disable before
   15914 		 * accessing any PHY registers
   15915 		 */
   15916 		if (sc->sc_type == WM_T_ICH8)
   15917 			wm_gig_downshift_workaround_ich8lan(sc);
   15918 
   15919 		/* Write VR power-down enable */
   15920 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15921 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15922 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   15923 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   15924 
   15925 		/* Read it back and test */
   15926 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15927 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15928 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   15929 			break;
   15930 
   15931 		/* Issue PHY reset and repeat at most one more time */
   15932 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   15933 	}
   15934 }
   15935 
   15936 /*
   15937  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   15938  *  @sc: pointer to the HW structure
   15939  *
   15940  *  During S0 to Sx transition, it is possible the link remains at gig
   15941  *  instead of negotiating to a lower speed.  Before going to Sx, set
   15942  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   15943  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   15944  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   15945  *  needs to be written.
   15946  *  Parts that support (and are linked to a partner which support) EEE in
   15947  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   15948  *  than 10Mbps w/o EEE.
   15949  */
   15950 static void
   15951 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   15952 {
   15953 	device_t dev = sc->sc_dev;
   15954 	struct ethercom *ec = &sc->sc_ethercom;
   15955 	uint32_t phy_ctrl;
   15956 	int rv;
   15957 
   15958 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   15959 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   15960 
   15961 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   15962 
   15963 	if (sc->sc_phytype == WMPHY_I217) {
   15964 		uint16_t devid = sc->sc_pcidevid;
   15965 
   15966 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   15967 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   15968 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   15969 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   15970 		    (sc->sc_type >= WM_T_PCH_SPT))
   15971 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   15972 			    CSR_READ(sc, WMREG_FEXTNVM6)
   15973 			    & ~FEXTNVM6_REQ_PLL_CLK);
   15974 
   15975 		if (sc->phy.acquire(sc) != 0)
   15976 			goto out;
   15977 
   15978 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15979 			uint16_t eee_advert;
   15980 
   15981 			rv = wm_read_emi_reg_locked(dev,
   15982 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   15983 			if (rv)
   15984 				goto release;
   15985 
   15986 			/*
   15987 			 * Disable LPLU if both link partners support 100BaseT
   15988 			 * EEE and 100Full is advertised on both ends of the
   15989 			 * link, and enable Auto Enable LPI since there will
   15990 			 * be no driver to enable LPI while in Sx.
   15991 			 */
   15992 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   15993 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   15994 				uint16_t anar, phy_reg;
   15995 
   15996 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   15997 				    &anar);
   15998 				if (anar & ANAR_TX_FD) {
   15999 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   16000 					    PHY_CTRL_NOND0A_LPLU);
   16001 
   16002 					/* Set Auto Enable LPI after link up */
   16003 					sc->phy.readreg_locked(dev, 2,
   16004 					    I217_LPI_GPIO_CTRL, &phy_reg);
   16005 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16006 					sc->phy.writereg_locked(dev, 2,
   16007 					    I217_LPI_GPIO_CTRL, phy_reg);
   16008 				}
   16009 			}
   16010 		}
   16011 
   16012 		/*
   16013 		 * For i217 Intel Rapid Start Technology support,
   16014 		 * when the system is going into Sx and no manageability engine
   16015 		 * is present, the driver must configure proxy to reset only on
   16016 		 * power good.	LPI (Low Power Idle) state must also reset only
   16017 		 * on power good, as well as the MTA (Multicast table array).
   16018 		 * The SMBus release must also be disabled on LCD reset.
   16019 		 */
   16020 
   16021 		/*
   16022 		 * Enable MTA to reset for Intel Rapid Start Technology
   16023 		 * Support
   16024 		 */
   16025 
   16026 release:
   16027 		sc->phy.release(sc);
   16028 	}
   16029 out:
   16030 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   16031 
   16032 	if (sc->sc_type == WM_T_ICH8)
   16033 		wm_gig_downshift_workaround_ich8lan(sc);
   16034 
   16035 	if (sc->sc_type >= WM_T_PCH) {
   16036 		wm_oem_bits_config_ich8lan(sc, false);
   16037 
   16038 		/* Reset PHY to activate OEM bits on 82577/8 */
   16039 		if (sc->sc_type == WM_T_PCH)
   16040 			wm_reset_phy(sc);
   16041 
   16042 		if (sc->phy.acquire(sc) != 0)
   16043 			return;
   16044 		wm_write_smbus_addr(sc);
   16045 		sc->phy.release(sc);
   16046 	}
   16047 }
   16048 
   16049 /*
   16050  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   16051  *  @sc: pointer to the HW structure
   16052  *
   16053  *  During Sx to S0 transitions on non-managed devices or managed devices
   16054  *  on which PHY resets are not blocked, if the PHY registers cannot be
   16055  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   16056  *  the PHY.
   16057  *  On i217, setup Intel Rapid Start Technology.
   16058  */
   16059 static int
   16060 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   16061 {
   16062 	device_t dev = sc->sc_dev;
   16063 	int rv;
   16064 
   16065 	if (sc->sc_type < WM_T_PCH2)
   16066 		return 0;
   16067 
   16068 	rv = wm_init_phy_workarounds_pchlan(sc);
   16069 	if (rv != 0)
   16070 		return -1;
   16071 
   16072 	/* For i217 Intel Rapid Start Technology support when the system
   16073 	 * is transitioning from Sx and no manageability engine is present
   16074 	 * configure SMBus to restore on reset, disable proxy, and enable
   16075 	 * the reset on MTA (Multicast table array).
   16076 	 */
   16077 	if (sc->sc_phytype == WMPHY_I217) {
   16078 		uint16_t phy_reg;
   16079 
   16080 		if (sc->phy.acquire(sc) != 0)
   16081 			return -1;
   16082 
   16083 		/* Clear Auto Enable LPI after link up */
   16084 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   16085 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16086 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   16087 
   16088 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16089 			/* Restore clear on SMB if no manageability engine
   16090 			 * is present
   16091 			 */
   16092 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   16093 			    &phy_reg);
   16094 			if (rv != 0)
   16095 				goto release;
   16096 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   16097 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   16098 
   16099 			/* Disable Proxy */
   16100 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   16101 		}
   16102 		/* Enable reset on MTA */
   16103 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   16104 		if (rv != 0)
   16105 			goto release;
   16106 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   16107 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   16108 
   16109 release:
   16110 		sc->phy.release(sc);
   16111 		return rv;
   16112 	}
   16113 
   16114 	return 0;
   16115 }
   16116 
   16117 static void
   16118 wm_enable_wakeup(struct wm_softc *sc)
   16119 {
   16120 	uint32_t reg, pmreg;
   16121 	pcireg_t pmode;
   16122 	int rv = 0;
   16123 
   16124 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16125 		device_xname(sc->sc_dev), __func__));
   16126 
   16127 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16128 	    &pmreg, NULL) == 0)
   16129 		return;
   16130 
   16131 	if ((sc->sc_flags & WM_F_WOL) == 0)
   16132 		goto pme;
   16133 
   16134 	/* Advertise the wakeup capability */
   16135 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   16136 	    | CTRL_SWDPIN(3));
   16137 
   16138 	/* Keep the laser running on fiber adapters */
   16139 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   16140 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   16141 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16142 		reg |= CTRL_EXT_SWDPIN(3);
   16143 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16144 	}
   16145 
   16146 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   16147 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   16148 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   16149 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   16150 		wm_suspend_workarounds_ich8lan(sc);
   16151 
   16152 #if 0	/* For the multicast packet */
   16153 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   16154 	reg |= WUFC_MC;
   16155 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   16156 #endif
   16157 
   16158 	if (sc->sc_type >= WM_T_PCH) {
   16159 		rv = wm_enable_phy_wakeup(sc);
   16160 		if (rv != 0)
   16161 			goto pme;
   16162 	} else {
   16163 		/* Enable wakeup by the MAC */
   16164 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   16165 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   16166 	}
   16167 
   16168 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   16169 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   16170 		|| (sc->sc_type == WM_T_PCH2))
   16171 	    && (sc->sc_phytype == WMPHY_IGP_3))
   16172 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   16173 
   16174 pme:
   16175 	/* Request PME */
   16176 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   16177 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   16178 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   16179 		/* For WOL */
   16180 		pmode |= PCI_PMCSR_PME_EN;
   16181 	} else {
   16182 		/* Disable WOL */
   16183 		pmode &= ~PCI_PMCSR_PME_EN;
   16184 	}
   16185 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   16186 }
   16187 
   16188 /* Disable ASPM L0s and/or L1 for workaround */
   16189 static void
   16190 wm_disable_aspm(struct wm_softc *sc)
   16191 {
   16192 	pcireg_t reg, mask = 0;
   16193 	unsigned const char *str = "";
   16194 
   16195 	/*
   16196 	 *  Only for PCIe device which has PCIe capability in the PCI config
   16197 	 * space.
   16198 	 */
   16199 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   16200 		return;
   16201 
   16202 	switch (sc->sc_type) {
   16203 	case WM_T_82571:
   16204 	case WM_T_82572:
   16205 		/*
   16206 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   16207 		 * State Power management L1 State (ASPM L1).
   16208 		 */
   16209 		mask = PCIE_LCSR_ASPM_L1;
   16210 		str = "L1 is";
   16211 		break;
   16212 	case WM_T_82573:
   16213 	case WM_T_82574:
   16214 	case WM_T_82583:
   16215 		/*
   16216 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   16217 		 *
   16218 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   16219 		 * some chipset.  The document of 82574 and 82583 says that
   16220 		 * disabling L0s with some specific chipset is sufficient,
   16221 		 * but we follow as of the Intel em driver does.
   16222 		 *
   16223 		 * References:
   16224 		 * Errata 8 of the Specification Update of i82573.
   16225 		 * Errata 20 of the Specification Update of i82574.
   16226 		 * Errata 9 of the Specification Update of i82583.
   16227 		 */
   16228 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   16229 		str = "L0s and L1 are";
   16230 		break;
   16231 	default:
   16232 		return;
   16233 	}
   16234 
   16235 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16236 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   16237 	reg &= ~mask;
   16238 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16239 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   16240 
   16241 	/* Print only in wm_attach() */
   16242 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   16243 		aprint_verbose_dev(sc->sc_dev,
   16244 		    "ASPM %s disabled to workaround the errata.\n", str);
   16245 }
   16246 
   16247 /* LPLU */
   16248 
   16249 static void
   16250 wm_lplu_d0_disable(struct wm_softc *sc)
   16251 {
   16252 	struct mii_data *mii = &sc->sc_mii;
   16253 	uint32_t reg;
   16254 	uint16_t phyval;
   16255 
   16256 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16257 		device_xname(sc->sc_dev), __func__));
   16258 
   16259 	if (sc->sc_phytype == WMPHY_IFE)
   16260 		return;
   16261 
   16262 	switch (sc->sc_type) {
   16263 	case WM_T_82571:
   16264 	case WM_T_82572:
   16265 	case WM_T_82573:
   16266 	case WM_T_82575:
   16267 	case WM_T_82576:
   16268 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
   16269 		phyval &= ~PMR_D0_LPLU;
   16270 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
   16271 		break;
   16272 	case WM_T_82580:
   16273 	case WM_T_I350:
   16274 	case WM_T_I210:
   16275 	case WM_T_I211:
   16276 		reg = CSR_READ(sc, WMREG_PHPM);
   16277 		reg &= ~PHPM_D0A_LPLU;
   16278 		CSR_WRITE(sc, WMREG_PHPM, reg);
   16279 		break;
   16280 	case WM_T_82574:
   16281 	case WM_T_82583:
   16282 	case WM_T_ICH8:
   16283 	case WM_T_ICH9:
   16284 	case WM_T_ICH10:
   16285 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16286 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   16287 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16288 		CSR_WRITE_FLUSH(sc);
   16289 		break;
   16290 	case WM_T_PCH:
   16291 	case WM_T_PCH2:
   16292 	case WM_T_PCH_LPT:
   16293 	case WM_T_PCH_SPT:
   16294 	case WM_T_PCH_CNP:
   16295 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   16296 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   16297 		if (wm_phy_resetisblocked(sc) == false)
   16298 			phyval |= HV_OEM_BITS_ANEGNOW;
   16299 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   16300 		break;
   16301 	default:
   16302 		break;
   16303 	}
   16304 }
   16305 
   16306 /* EEE */
   16307 
   16308 static int
   16309 wm_set_eee_i350(struct wm_softc *sc)
   16310 {
   16311 	struct ethercom *ec = &sc->sc_ethercom;
   16312 	uint32_t ipcnfg, eeer;
   16313 	uint32_t ipcnfg_mask
   16314 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   16315 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   16316 
   16317 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   16318 
   16319 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   16320 	eeer = CSR_READ(sc, WMREG_EEER);
   16321 
   16322 	/* Enable or disable per user setting */
   16323 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16324 		ipcnfg |= ipcnfg_mask;
   16325 		eeer |= eeer_mask;
   16326 	} else {
   16327 		ipcnfg &= ~ipcnfg_mask;
   16328 		eeer &= ~eeer_mask;
   16329 	}
   16330 
   16331 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   16332 	CSR_WRITE(sc, WMREG_EEER, eeer);
   16333 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   16334 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   16335 
   16336 	return 0;
   16337 }
   16338 
   16339 static int
   16340 wm_set_eee_pchlan(struct wm_softc *sc)
   16341 {
   16342 	device_t dev = sc->sc_dev;
   16343 	struct ethercom *ec = &sc->sc_ethercom;
   16344 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   16345 	int rv = 0;
   16346 
   16347 	switch (sc->sc_phytype) {
   16348 	case WMPHY_82579:
   16349 		lpa = I82579_EEE_LP_ABILITY;
   16350 		pcs_status = I82579_EEE_PCS_STATUS;
   16351 		adv_addr = I82579_EEE_ADVERTISEMENT;
   16352 		break;
   16353 	case WMPHY_I217:
   16354 		lpa = I217_EEE_LP_ABILITY;
   16355 		pcs_status = I217_EEE_PCS_STATUS;
   16356 		adv_addr = I217_EEE_ADVERTISEMENT;
   16357 		break;
   16358 	default:
   16359 		return 0;
   16360 	}
   16361 
   16362 	if (sc->phy.acquire(sc)) {
   16363 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   16364 		return 0;
   16365 	}
   16366 
   16367 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   16368 	if (rv != 0)
   16369 		goto release;
   16370 
   16371 	/* Clear bits that enable EEE in various speeds */
   16372 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   16373 
   16374 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16375 		/* Save off link partner's EEE ability */
   16376 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   16377 		if (rv != 0)
   16378 			goto release;
   16379 
   16380 		/* Read EEE advertisement */
   16381 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   16382 			goto release;
   16383 
   16384 		/*
   16385 		 * Enable EEE only for speeds in which the link partner is
   16386 		 * EEE capable and for which we advertise EEE.
   16387 		 */
   16388 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   16389 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   16390 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   16391 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   16392 			if ((data & ANLPAR_TX_FD) != 0)
   16393 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   16394 			else {
   16395 				/*
   16396 				 * EEE is not supported in 100Half, so ignore
   16397 				 * partner's EEE in 100 ability if full-duplex
   16398 				 * is not advertised.
   16399 				 */
   16400 				sc->eee_lp_ability
   16401 				    &= ~AN_EEEADVERT_100_TX;
   16402 			}
   16403 		}
   16404 	}
   16405 
   16406 	if (sc->sc_phytype == WMPHY_82579) {
   16407 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   16408 		if (rv != 0)
   16409 			goto release;
   16410 
   16411 		data &= ~I82579_LPI_PLL_SHUT_100;
   16412 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   16413 	}
   16414 
   16415 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   16416 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   16417 		goto release;
   16418 
   16419 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   16420 release:
   16421 	sc->phy.release(sc);
   16422 
   16423 	return rv;
   16424 }
   16425 
   16426 static int
   16427 wm_set_eee(struct wm_softc *sc)
   16428 {
   16429 	struct ethercom *ec = &sc->sc_ethercom;
   16430 
   16431 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   16432 		return 0;
   16433 
   16434 	if (sc->sc_type == WM_T_I354) {
   16435 		/* I354 uses an external PHY */
   16436 		return 0; /* not yet */
   16437 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   16438 		return wm_set_eee_i350(sc);
   16439 	else if (sc->sc_type >= WM_T_PCH2)
   16440 		return wm_set_eee_pchlan(sc);
   16441 
   16442 	return 0;
   16443 }
   16444 
   16445 /*
   16446  * Workarounds (mainly PHY related).
   16447  * Basically, PHY's workarounds are in the PHY drivers.
   16448  */
   16449 
   16450 /* Workaround for 82566 Kumeran PCS lock loss */
   16451 static int
   16452 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   16453 {
   16454 	struct mii_data *mii = &sc->sc_mii;
   16455 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16456 	int i, reg, rv;
   16457 	uint16_t phyreg;
   16458 
   16459 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16460 		device_xname(sc->sc_dev), __func__));
   16461 
   16462 	/* If the link is not up, do nothing */
   16463 	if ((status & STATUS_LU) == 0)
   16464 		return 0;
   16465 
   16466 	/* Nothing to do if the link is other than 1Gbps */
   16467 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   16468 		return 0;
   16469 
   16470 	for (i = 0; i < 10; i++) {
   16471 		/* read twice */
   16472 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16473 		if (rv != 0)
   16474 			return rv;
   16475 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16476 		if (rv != 0)
   16477 			return rv;
   16478 
   16479 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   16480 			goto out;	/* GOOD! */
   16481 
   16482 		/* Reset the PHY */
   16483 		wm_reset_phy(sc);
   16484 		delay(5*1000);
   16485 	}
   16486 
   16487 	/* Disable GigE link negotiation */
   16488 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16489 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   16490 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16491 
   16492 	/*
   16493 	 * Call gig speed drop workaround on Gig disable before accessing
   16494 	 * any PHY registers.
   16495 	 */
   16496 	wm_gig_downshift_workaround_ich8lan(sc);
   16497 
   16498 out:
   16499 	return 0;
   16500 }
   16501 
   16502 /*
   16503  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   16504  *  @sc: pointer to the HW structure
   16505  *
   16506  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   16507  *  LPLU, Gig disable, MDIC PHY reset):
   16508  *    1) Set Kumeran Near-end loopback
   16509  *    2) Clear Kumeran Near-end loopback
   16510  *  Should only be called for ICH8[m] devices with any 1G Phy.
   16511  */
   16512 static void
   16513 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   16514 {
   16515 	uint16_t kmreg;
   16516 
   16517 	/* Only for igp3 */
   16518 	if (sc->sc_phytype == WMPHY_IGP_3) {
   16519 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   16520 			return;
   16521 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   16522 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   16523 			return;
   16524 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   16525 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   16526 	}
   16527 }
   16528 
   16529 /*
   16530  * Workaround for pch's PHYs
   16531  * XXX should be moved to new PHY driver?
   16532  */
   16533 static int
   16534 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16535 {
   16536 	device_t dev = sc->sc_dev;
   16537 	struct mii_data *mii = &sc->sc_mii;
   16538 	struct mii_softc *child;
   16539 	uint16_t phy_data, phyrev = 0;
   16540 	int phytype = sc->sc_phytype;
   16541 	int rv;
   16542 
   16543 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16544 		device_xname(dev), __func__));
   16545 	KASSERT(sc->sc_type == WM_T_PCH);
   16546 
   16547 	/* Set MDIO slow mode before any other MDIO access */
   16548 	if (phytype == WMPHY_82577)
   16549 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   16550 			return rv;
   16551 
   16552 	child = LIST_FIRST(&mii->mii_phys);
   16553 	if (child != NULL)
   16554 		phyrev = child->mii_mpd_rev;
   16555 
   16556 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   16557 	if ((child != NULL) &&
   16558 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   16559 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   16560 		/* Disable generation of early preamble (0x4431) */
   16561 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16562 		    &phy_data);
   16563 		if (rv != 0)
   16564 			return rv;
   16565 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   16566 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   16567 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16568 		    phy_data);
   16569 		if (rv != 0)
   16570 			return rv;
   16571 
   16572 		/* Preamble tuning for SSC */
   16573 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   16574 		if (rv != 0)
   16575 			return rv;
   16576 	}
   16577 
   16578 	/* 82578 */
   16579 	if (phytype == WMPHY_82578) {
   16580 		/*
   16581 		 * Return registers to default by doing a soft reset then
   16582 		 * writing 0x3140 to the control register
   16583 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   16584 		 */
   16585 		if ((child != NULL) && (phyrev < 2)) {
   16586 			PHY_RESET(child);
   16587 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   16588 			if (rv != 0)
   16589 				return rv;
   16590 		}
   16591 	}
   16592 
   16593 	/* Select page 0 */
   16594 	if ((rv = sc->phy.acquire(sc)) != 0)
   16595 		return rv;
   16596 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
   16597 	sc->phy.release(sc);
   16598 	if (rv != 0)
   16599 		return rv;
   16600 
   16601 	/*
   16602 	 * Configure the K1 Si workaround during phy reset assuming there is
   16603 	 * link so that it disables K1 if link is in 1Gbps.
   16604 	 */
   16605 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   16606 		return rv;
   16607 
   16608 	/* Workaround for link disconnects on a busy hub in half duplex */
   16609 	rv = sc->phy.acquire(sc);
   16610 	if (rv)
   16611 		return rv;
   16612 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   16613 	if (rv)
   16614 		goto release;
   16615 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   16616 	    phy_data & 0x00ff);
   16617 	if (rv)
   16618 		goto release;
   16619 
   16620 	/* Set MSE higher to enable link to stay up when noise is high */
   16621 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   16622 release:
   16623 	sc->phy.release(sc);
   16624 
   16625 	return rv;
   16626 }
   16627 
   16628 /*
   16629  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   16630  *  @sc:   pointer to the HW structure
   16631  */
   16632 static void
   16633 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   16634 {
   16635 
   16636 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16637 		device_xname(sc->sc_dev), __func__));
   16638 
   16639 	if (sc->phy.acquire(sc) != 0)
   16640 		return;
   16641 
   16642 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16643 
   16644 	sc->phy.release(sc);
   16645 }
   16646 
   16647 static void
   16648 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
   16649 {
   16650 	device_t dev = sc->sc_dev;
   16651 	uint32_t mac_reg;
   16652 	uint16_t i, wuce;
   16653 	int count;
   16654 
   16655 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16656 		device_xname(dev), __func__));
   16657 
   16658 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   16659 		return;
   16660 
   16661 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   16662 	count = wm_rar_count(sc);
   16663 	for (i = 0; i < count; i++) {
   16664 		uint16_t lo, hi;
   16665 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16666 		lo = (uint16_t)(mac_reg & 0xffff);
   16667 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   16668 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   16669 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   16670 
   16671 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16672 		lo = (uint16_t)(mac_reg & 0xffff);
   16673 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   16674 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   16675 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   16676 	}
   16677 
   16678 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   16679 }
   16680 
   16681 /*
   16682  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
   16683  *  with 82579 PHY
   16684  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
   16685  */
   16686 static int
   16687 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
   16688 {
   16689 	device_t dev = sc->sc_dev;
   16690 	int rar_count;
   16691 	int rv;
   16692 	uint32_t mac_reg;
   16693 	uint16_t dft_ctrl, data;
   16694 	uint16_t i;
   16695 
   16696 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16697 		device_xname(dev), __func__));
   16698 
   16699 	if (sc->sc_type < WM_T_PCH2)
   16700 		return 0;
   16701 
   16702 	/* Acquire PHY semaphore */
   16703 	rv = sc->phy.acquire(sc);
   16704 	if (rv != 0)
   16705 		return rv;
   16706 
   16707 	/* Disable Rx path while enabling/disabling workaround */
   16708 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
   16709 	if (rv != 0)
   16710 		goto out;
   16711 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16712 	    dft_ctrl | (1 << 14));
   16713 	if (rv != 0)
   16714 		goto out;
   16715 
   16716 	if (enable) {
   16717 		/* Write Rx addresses (rar_entry_count for RAL/H, and
   16718 		 * SHRAL/H) and initial CRC values to the MAC
   16719 		 */
   16720 		rar_count = wm_rar_count(sc);
   16721 		for (i = 0; i < rar_count; i++) {
   16722 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
   16723 			uint32_t addr_high, addr_low;
   16724 
   16725 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16726 			if (!(addr_high & RAL_AV))
   16727 				continue;
   16728 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16729 			mac_addr[0] = (addr_low & 0xFF);
   16730 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
   16731 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
   16732 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
   16733 			mac_addr[4] = (addr_high & 0xFF);
   16734 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
   16735 
   16736 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
   16737 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
   16738 		}
   16739 
   16740 		/* Write Rx addresses to the PHY */
   16741 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16742 	}
   16743 
   16744 	/*
   16745 	 * If enable ==
   16746 	 *	true: Enable jumbo frame workaround in the MAC.
   16747 	 *	false: Write MAC register values back to h/w defaults.
   16748 	 */
   16749 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
   16750 	if (enable) {
   16751 		mac_reg &= ~(1 << 14);
   16752 		mac_reg |= (7 << 15);
   16753 	} else
   16754 		mac_reg &= ~(0xf << 14);
   16755 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
   16756 
   16757 	mac_reg = CSR_READ(sc, WMREG_RCTL);
   16758 	if (enable) {
   16759 		mac_reg |= RCTL_SECRC;
   16760 		sc->sc_rctl |= RCTL_SECRC;
   16761 		sc->sc_flags |= WM_F_CRC_STRIP;
   16762 	} else {
   16763 		mac_reg &= ~RCTL_SECRC;
   16764 		sc->sc_rctl &= ~RCTL_SECRC;
   16765 		sc->sc_flags &= ~WM_F_CRC_STRIP;
   16766 	}
   16767 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
   16768 
   16769 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
   16770 	if (rv != 0)
   16771 		goto out;
   16772 	if (enable)
   16773 		data |= 1 << 0;
   16774 	else
   16775 		data &= ~(1 << 0);
   16776 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
   16777 	if (rv != 0)
   16778 		goto out;
   16779 
   16780 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
   16781 	if (rv != 0)
   16782 		goto out;
   16783 	/*
   16784 	 * XXX FreeBSD and Linux do the same thing that they set the same value
   16785 	 * on both the enable case and the disable case. Is it correct?
   16786 	 */
   16787 	data &= ~(0xf << 8);
   16788 	data |= (0xb << 8);
   16789 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
   16790 	if (rv != 0)
   16791 		goto out;
   16792 
   16793 	/*
   16794 	 * If enable ==
   16795 	 *	true: Enable jumbo frame workaround in the PHY.
   16796 	 *	false: Write PHY register values back to h/w defaults.
   16797 	 */
   16798 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
   16799 	if (rv != 0)
   16800 		goto out;
   16801 	data &= ~(0x7F << 5);
   16802 	if (enable)
   16803 		data |= (0x37 << 5);
   16804 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
   16805 	if (rv != 0)
   16806 		goto out;
   16807 
   16808 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
   16809 	if (rv != 0)
   16810 		goto out;
   16811 	if (enable)
   16812 		data &= ~(1 << 13);
   16813 	else
   16814 		data |= (1 << 13);
   16815 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
   16816 	if (rv != 0)
   16817 		goto out;
   16818 
   16819 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
   16820 	if (rv != 0)
   16821 		goto out;
   16822 	data &= ~(0x3FF << 2);
   16823 	if (enable)
   16824 		data |= (I82579_TX_PTR_GAP << 2);
   16825 	else
   16826 		data |= (0x8 << 2);
   16827 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
   16828 	if (rv != 0)
   16829 		goto out;
   16830 
   16831 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
   16832 	    enable ? 0xf100 : 0x7e00);
   16833 	if (rv != 0)
   16834 		goto out;
   16835 
   16836 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
   16837 	if (rv != 0)
   16838 		goto out;
   16839 	if (enable)
   16840 		data |= 1 << 10;
   16841 	else
   16842 		data &= ~(1 << 10);
   16843 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
   16844 	if (rv != 0)
   16845 		goto out;
   16846 
   16847 	/* Re-enable Rx path after enabling/disabling workaround */
   16848 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16849 	    dft_ctrl & ~(1 << 14));
   16850 
   16851 out:
   16852 	sc->phy.release(sc);
   16853 
   16854 	return rv;
   16855 }
   16856 
   16857 /*
   16858  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   16859  *  done after every PHY reset.
   16860  */
   16861 static int
   16862 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16863 {
   16864 	device_t dev = sc->sc_dev;
   16865 	int rv;
   16866 
   16867 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16868 		device_xname(dev), __func__));
   16869 	KASSERT(sc->sc_type == WM_T_PCH2);
   16870 
   16871 	/* Set MDIO slow mode before any other MDIO access */
   16872 	rv = wm_set_mdio_slow_mode_hv(sc);
   16873 	if (rv != 0)
   16874 		return rv;
   16875 
   16876 	rv = sc->phy.acquire(sc);
   16877 	if (rv != 0)
   16878 		return rv;
   16879 	/* Set MSE higher to enable link to stay up when noise is high */
   16880 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   16881 	if (rv != 0)
   16882 		goto release;
   16883 	/* Drop link after 5 times MSE threshold was reached */
   16884 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   16885 release:
   16886 	sc->phy.release(sc);
   16887 
   16888 	return rv;
   16889 }
   16890 
   16891 /**
   16892  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   16893  *  @link: link up bool flag
   16894  *
   16895  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   16896  *  preventing further DMA write requests.  Workaround the issue by disabling
   16897  *  the de-assertion of the clock request when in 1Gpbs mode.
   16898  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   16899  *  speeds in order to avoid Tx hangs.
   16900  **/
   16901 static int
   16902 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   16903 {
   16904 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   16905 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16906 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   16907 	uint16_t phyreg;
   16908 
   16909 	if (link && (speed == STATUS_SPEED_1000)) {
   16910 		sc->phy.acquire(sc);
   16911 		int rv = wm_kmrn_readreg_locked(sc,
   16912 		    KUMCTRLSTA_OFFSET_K1_CONFIG, &phyreg);
   16913 		if (rv != 0)
   16914 			goto release;
   16915 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16916 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   16917 		if (rv != 0)
   16918 			goto release;
   16919 		delay(20);
   16920 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   16921 
   16922 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16923 		    &phyreg);
   16924 release:
   16925 		sc->phy.release(sc);
   16926 		return rv;
   16927 	}
   16928 
   16929 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   16930 
   16931 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   16932 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   16933 	    || !link
   16934 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   16935 		goto update_fextnvm6;
   16936 
   16937 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   16938 
   16939 	/* Clear link status transmit timeout */
   16940 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   16941 	if (speed == STATUS_SPEED_100) {
   16942 		/* Set inband Tx timeout to 5x10us for 100Half */
   16943 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16944 
   16945 		/* Do not extend the K1 entry latency for 100Half */
   16946 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16947 	} else {
   16948 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   16949 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16950 
   16951 		/* Extend the K1 entry latency for 10 Mbps */
   16952 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16953 	}
   16954 
   16955 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   16956 
   16957 update_fextnvm6:
   16958 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   16959 	return 0;
   16960 }
   16961 
   16962 /*
   16963  *  wm_k1_gig_workaround_hv - K1 Si workaround
   16964  *  @sc:   pointer to the HW structure
   16965  *  @link: link up bool flag
   16966  *
   16967  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   16968  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   16969  *  If link is down, the function will restore the default K1 setting located
   16970  *  in the NVM.
   16971  */
   16972 static int
   16973 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   16974 {
   16975 	int k1_enable = sc->sc_nvm_k1_enabled;
   16976 
   16977 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16978 		device_xname(sc->sc_dev), __func__));
   16979 
   16980 	if (sc->phy.acquire(sc) != 0)
   16981 		return -1;
   16982 
   16983 	if (link) {
   16984 		k1_enable = 0;
   16985 
   16986 		/* Link stall fix for link up */
   16987 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16988 		    0x0100);
   16989 	} else {
   16990 		/* Link stall fix for link down */
   16991 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16992 		    0x4100);
   16993 	}
   16994 
   16995 	wm_configure_k1_ich8lan(sc, k1_enable);
   16996 	sc->phy.release(sc);
   16997 
   16998 	return 0;
   16999 }
   17000 
   17001 /*
   17002  *  wm_k1_workaround_lv - K1 Si workaround
   17003  *  @sc:   pointer to the HW structure
   17004  *
   17005  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   17006  *  Disable K1 for 1000 and 100 speeds
   17007  */
   17008 static int
   17009 wm_k1_workaround_lv(struct wm_softc *sc)
   17010 {
   17011 	uint32_t reg;
   17012 	uint16_t phyreg;
   17013 	int rv;
   17014 
   17015 	if (sc->sc_type != WM_T_PCH2)
   17016 		return 0;
   17017 
   17018 	/* Set K1 beacon duration based on 10Mbps speed */
   17019 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   17020 	if (rv != 0)
   17021 		return rv;
   17022 
   17023 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   17024 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   17025 		if (phyreg &
   17026 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   17027 			/* LV 1G/100 Packet drop issue wa  */
   17028 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   17029 			    &phyreg);
   17030 			if (rv != 0)
   17031 				return rv;
   17032 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   17033 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   17034 			    phyreg);
   17035 			if (rv != 0)
   17036 				return rv;
   17037 		} else {
   17038 			/* For 10Mbps */
   17039 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   17040 			reg &= ~FEXTNVM4_BEACON_DURATION;
   17041 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   17042 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   17043 		}
   17044 	}
   17045 
   17046 	return 0;
   17047 }
   17048 
   17049 /*
   17050  *  wm_link_stall_workaround_hv - Si workaround
   17051  *  @sc: pointer to the HW structure
   17052  *
   17053  *  This function works around a Si bug where the link partner can get
   17054  *  a link up indication before the PHY does. If small packets are sent
   17055  *  by the link partner they can be placed in the packet buffer without
   17056  *  being properly accounted for by the PHY and will stall preventing
   17057  *  further packets from being received.  The workaround is to clear the
   17058  *  packet buffer after the PHY detects link up.
   17059  */
   17060 static int
   17061 wm_link_stall_workaround_hv(struct wm_softc *sc)
   17062 {
   17063 	uint16_t phyreg;
   17064 
   17065 	if (sc->sc_phytype != WMPHY_82578)
   17066 		return 0;
   17067 
   17068 	/* Do not apply workaround if in PHY loopback bit 14 set */
   17069 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   17070 	if ((phyreg & BMCR_LOOP) != 0)
   17071 		return 0;
   17072 
   17073 	/* Check if link is up and at 1Gbps */
   17074 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   17075 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17076 	    | BM_CS_STATUS_SPEED_MASK;
   17077 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17078 		| BM_CS_STATUS_SPEED_1000))
   17079 		return 0;
   17080 
   17081 	delay(200 * 1000);	/* XXX too big */
   17082 
   17083 	/* Flush the packets in the fifo buffer */
   17084 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17085 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   17086 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17087 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   17088 
   17089 	return 0;
   17090 }
   17091 
   17092 static int
   17093 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   17094 {
   17095 	int rv;
   17096 	uint16_t reg;
   17097 
   17098 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   17099 	if (rv != 0)
   17100 		return rv;
   17101 
   17102 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   17103 	    reg | HV_KMRN_MDIO_SLOW);
   17104 }
   17105 
   17106 /*
   17107  *  wm_configure_k1_ich8lan - Configure K1 power state
   17108  *  @sc: pointer to the HW structure
   17109  *  @enable: K1 state to configure
   17110  *
   17111  *  Configure the K1 power state based on the provided parameter.
   17112  *  Assumes semaphore already acquired.
   17113  */
   17114 static void
   17115 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   17116 {
   17117 	uint32_t ctrl, ctrl_ext, tmp;
   17118 	uint16_t kmreg;
   17119 	int rv;
   17120 
   17121 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17122 
   17123 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   17124 	if (rv != 0)
   17125 		return;
   17126 
   17127 	if (k1_enable)
   17128 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   17129 	else
   17130 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   17131 
   17132 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   17133 	if (rv != 0)
   17134 		return;
   17135 
   17136 	delay(20);
   17137 
   17138 	ctrl = CSR_READ(sc, WMREG_CTRL);
   17139 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   17140 
   17141 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   17142 	tmp |= CTRL_FRCSPD;
   17143 
   17144 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   17145 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   17146 	CSR_WRITE_FLUSH(sc);
   17147 	delay(20);
   17148 
   17149 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   17150 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   17151 	CSR_WRITE_FLUSH(sc);
   17152 	delay(20);
   17153 
   17154 	return;
   17155 }
   17156 
   17157 /* special case - for 82575 - need to do manual init ... */
   17158 static void
   17159 wm_reset_init_script_82575(struct wm_softc *sc)
   17160 {
   17161 	/*
   17162 	 * Remark: this is untested code - we have no board without EEPROM
   17163 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   17164 	 */
   17165 
   17166 	/* SerDes configuration via SERDESCTRL */
   17167 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   17168 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   17169 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   17170 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   17171 
   17172 	/* CCM configuration via CCMCTL register */
   17173 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   17174 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   17175 
   17176 	/* PCIe lanes configuration */
   17177 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   17178 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   17179 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   17180 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   17181 
   17182 	/* PCIe PLL Configuration */
   17183 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   17184 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   17185 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   17186 }
   17187 
   17188 static void
   17189 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   17190 {
   17191 	uint32_t reg;
   17192 	uint16_t nvmword;
   17193 	int rv;
   17194 
   17195 	if (sc->sc_type != WM_T_82580)
   17196 		return;
   17197 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   17198 		return;
   17199 
   17200 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   17201 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   17202 	if (rv != 0) {
   17203 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   17204 		    __func__);
   17205 		return;
   17206 	}
   17207 
   17208 	reg = CSR_READ(sc, WMREG_MDICNFG);
   17209 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   17210 		reg |= MDICNFG_DEST;
   17211 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   17212 		reg |= MDICNFG_COM_MDIO;
   17213 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17214 }
   17215 
   17216 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   17217 
   17218 static bool
   17219 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   17220 {
   17221 	uint32_t reg;
   17222 	uint16_t id1, id2;
   17223 	int i, rv;
   17224 
   17225 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17226 		device_xname(sc->sc_dev), __func__));
   17227 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17228 
   17229 	id1 = id2 = 0xffff;
   17230 	for (i = 0; i < 2; i++) {
   17231 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   17232 		    &id1);
   17233 		if ((rv != 0) || MII_INVALIDID(id1))
   17234 			continue;
   17235 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   17236 		    &id2);
   17237 		if ((rv != 0) || MII_INVALIDID(id2))
   17238 			continue;
   17239 		break;
   17240 	}
   17241 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   17242 		goto out;
   17243 
   17244 	/*
   17245 	 * In case the PHY needs to be in mdio slow mode,
   17246 	 * set slow mode and try to get the PHY id again.
   17247 	 */
   17248 	rv = 0;
   17249 	if (sc->sc_type < WM_T_PCH_LPT) {
   17250 		sc->phy.release(sc);
   17251 		wm_set_mdio_slow_mode_hv(sc);
   17252 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   17253 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   17254 		sc->phy.acquire(sc);
   17255 	}
   17256 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   17257 		device_printf(sc->sc_dev, "XXX return with false\n");
   17258 		return false;
   17259 	}
   17260 out:
   17261 	if (sc->sc_type >= WM_T_PCH_LPT) {
   17262 		/* Only unforce SMBus if ME is not active */
   17263 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   17264 			uint16_t phyreg;
   17265 
   17266 			/* Unforce SMBus mode in PHY */
   17267 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   17268 			    CV_SMB_CTRL, &phyreg);
   17269 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   17270 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   17271 			    CV_SMB_CTRL, phyreg);
   17272 
   17273 			/* Unforce SMBus mode in MAC */
   17274 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17275 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   17276 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17277 		}
   17278 	}
   17279 	return true;
   17280 }
   17281 
   17282 static void
   17283 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   17284 {
   17285 	uint32_t reg;
   17286 	int i;
   17287 
   17288 	/* Set PHY Config Counter to 50msec */
   17289 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   17290 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   17291 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   17292 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   17293 
   17294 	/* Toggle LANPHYPC */
   17295 	reg = CSR_READ(sc, WMREG_CTRL);
   17296 	reg |= CTRL_LANPHYPC_OVERRIDE;
   17297 	reg &= ~CTRL_LANPHYPC_VALUE;
   17298 	CSR_WRITE(sc, WMREG_CTRL, reg);
   17299 	CSR_WRITE_FLUSH(sc);
   17300 	delay(1000);
   17301 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   17302 	CSR_WRITE(sc, WMREG_CTRL, reg);
   17303 	CSR_WRITE_FLUSH(sc);
   17304 
   17305 	if (sc->sc_type < WM_T_PCH_LPT)
   17306 		delay(50 * 1000);
   17307 	else {
   17308 		i = 20;
   17309 
   17310 		do {
   17311 			delay(5 * 1000);
   17312 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   17313 		    && i--);
   17314 
   17315 		delay(30 * 1000);
   17316 	}
   17317 }
   17318 
   17319 static int
   17320 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   17321 {
   17322 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   17323 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   17324 	uint32_t rxa;
   17325 	uint16_t scale = 0, lat_enc = 0;
   17326 	int32_t obff_hwm = 0;
   17327 	int64_t lat_ns, value;
   17328 
   17329 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17330 		device_xname(sc->sc_dev), __func__));
   17331 
   17332 	if (link) {
   17333 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   17334 		uint32_t status;
   17335 		uint16_t speed;
   17336 		pcireg_t preg;
   17337 
   17338 		status = CSR_READ(sc, WMREG_STATUS);
   17339 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   17340 		case STATUS_SPEED_10:
   17341 			speed = 10;
   17342 			break;
   17343 		case STATUS_SPEED_100:
   17344 			speed = 100;
   17345 			break;
   17346 		case STATUS_SPEED_1000:
   17347 			speed = 1000;
   17348 			break;
   17349 		default:
   17350 			device_printf(sc->sc_dev, "Unknown speed "
   17351 			    "(status = %08x)\n", status);
   17352 			return -1;
   17353 		}
   17354 
   17355 		/* Rx Packet Buffer Allocation size (KB) */
   17356 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   17357 
   17358 		/*
   17359 		 * Determine the maximum latency tolerated by the device.
   17360 		 *
   17361 		 * Per the PCIe spec, the tolerated latencies are encoded as
   17362 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   17363 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   17364 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   17365 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   17366 		 */
   17367 		lat_ns = ((int64_t)rxa * 1024 -
   17368 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   17369 			+ ETHER_HDR_LEN))) * 8 * 1000;
   17370 		if (lat_ns < 0)
   17371 			lat_ns = 0;
   17372 		else
   17373 			lat_ns /= speed;
   17374 		value = lat_ns;
   17375 
   17376 		while (value > LTRV_VALUE) {
   17377 			scale ++;
   17378 			value = howmany(value, __BIT(5));
   17379 		}
   17380 		if (scale > LTRV_SCALE_MAX) {
   17381 			device_printf(sc->sc_dev,
   17382 			    "Invalid LTR latency scale %d\n", scale);
   17383 			return -1;
   17384 		}
   17385 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   17386 
   17387 		/* Determine the maximum latency tolerated by the platform */
   17388 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17389 		    WM_PCI_LTR_CAP_LPT);
   17390 		max_snoop = preg & 0xffff;
   17391 		max_nosnoop = preg >> 16;
   17392 
   17393 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   17394 
   17395 		if (lat_enc > max_ltr_enc) {
   17396 			lat_enc = max_ltr_enc;
   17397 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   17398 			    * PCI_LTR_SCALETONS(
   17399 				    __SHIFTOUT(lat_enc,
   17400 					PCI_LTR_MAXSNOOPLAT_SCALE));
   17401 		}
   17402 
   17403 		if (lat_ns) {
   17404 			lat_ns *= speed * 1000;
   17405 			lat_ns /= 8;
   17406 			lat_ns /= 1000000000;
   17407 			obff_hwm = (int32_t)(rxa - lat_ns);
   17408 		}
   17409 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   17410 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   17411 			    "(rxa = %d, lat_ns = %d)\n",
   17412 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   17413 			return -1;
   17414 		}
   17415 	}
   17416 	/* Snoop and No-Snoop latencies the same */
   17417 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   17418 	CSR_WRITE(sc, WMREG_LTRV, reg);
   17419 
   17420 	/* Set OBFF high water mark */
   17421 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   17422 	reg |= obff_hwm;
   17423 	CSR_WRITE(sc, WMREG_SVT, reg);
   17424 
   17425 	/* Enable OBFF */
   17426 	reg = CSR_READ(sc, WMREG_SVCR);
   17427 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   17428 	CSR_WRITE(sc, WMREG_SVCR, reg);
   17429 
   17430 	return 0;
   17431 }
   17432 
   17433 /*
   17434  * I210 Errata 25 and I211 Errata 10
   17435  * Slow System Clock.
   17436  *
   17437  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   17438  */
   17439 static int
   17440 wm_pll_workaround_i210(struct wm_softc *sc)
   17441 {
   17442 	uint32_t mdicnfg, wuc;
   17443 	uint32_t reg;
   17444 	pcireg_t pcireg;
   17445 	uint32_t pmreg;
   17446 	uint16_t nvmword, tmp_nvmword;
   17447 	uint16_t phyval;
   17448 	bool wa_done = false;
   17449 	int i, rv = 0;
   17450 
   17451 	/* Get Power Management cap offset */
   17452 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   17453 	    &pmreg, NULL) == 0)
   17454 		return -1;
   17455 
   17456 	/* Save WUC and MDICNFG registers */
   17457 	wuc = CSR_READ(sc, WMREG_WUC);
   17458 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   17459 
   17460 	reg = mdicnfg & ~MDICNFG_DEST;
   17461 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17462 
   17463 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   17464 		/*
   17465 		 * The default value of the Initialization Control Word 1
   17466 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   17467 		 */
   17468 		nvmword = INVM_DEFAULT_AL;
   17469 	}
   17470 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   17471 
   17472 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   17473 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   17474 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   17475 
   17476 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   17477 			rv = 0;
   17478 			break; /* OK */
   17479 		} else
   17480 			rv = -1;
   17481 
   17482 		wa_done = true;
   17483 		/* Directly reset the internal PHY */
   17484 		reg = CSR_READ(sc, WMREG_CTRL);
   17485 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   17486 
   17487 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17488 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   17489 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17490 
   17491 		CSR_WRITE(sc, WMREG_WUC, 0);
   17492 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   17493 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17494 
   17495 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17496 		    pmreg + PCI_PMCSR);
   17497 		pcireg |= PCI_PMCSR_STATE_D3;
   17498 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17499 		    pmreg + PCI_PMCSR, pcireg);
   17500 		delay(1000);
   17501 		pcireg &= ~PCI_PMCSR_STATE_D3;
   17502 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17503 		    pmreg + PCI_PMCSR, pcireg);
   17504 
   17505 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   17506 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17507 
   17508 		/* Restore WUC register */
   17509 		CSR_WRITE(sc, WMREG_WUC, wuc);
   17510 	}
   17511 
   17512 	/* Restore MDICNFG setting */
   17513 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   17514 	if (wa_done)
   17515 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   17516 	return rv;
   17517 }
   17518 
   17519 static void
   17520 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   17521 {
   17522 	uint32_t reg;
   17523 
   17524 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17525 		device_xname(sc->sc_dev), __func__));
   17526 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   17527 	    || (sc->sc_type == WM_T_PCH_CNP));
   17528 
   17529 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   17530 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   17531 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   17532 
   17533 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   17534 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   17535 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   17536 }
   17537 
   17538 /* Sysctl functions */
   17539 static int
   17540 wm_sysctl_tdh_handler(SYSCTLFN_ARGS)
   17541 {
   17542 	struct sysctlnode node = *rnode;
   17543 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   17544 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   17545 	struct wm_softc *sc = txq->txq_sc;
   17546 	uint32_t reg;
   17547 
   17548 	reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id));
   17549 	node.sysctl_data = &reg;
   17550 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   17551 }
   17552 
   17553 static int
   17554 wm_sysctl_tdt_handler(SYSCTLFN_ARGS)
   17555 {
   17556 	struct sysctlnode node = *rnode;
   17557 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   17558 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   17559 	struct wm_softc *sc = txq->txq_sc;
   17560 	uint32_t reg;
   17561 
   17562 	reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id));
   17563 	node.sysctl_data = &reg;
   17564 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   17565 }
   17566 
   17567 #ifdef WM_DEBUG
   17568 static int
   17569 wm_sysctl_debug(SYSCTLFN_ARGS)
   17570 {
   17571 	struct sysctlnode node = *rnode;
   17572 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
   17573 	uint32_t dflags;
   17574 	int error;
   17575 
   17576 	dflags = sc->sc_debug;
   17577 	node.sysctl_data = &dflags;
   17578 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   17579 
   17580 	if (error || newp == NULL)
   17581 		return error;
   17582 
   17583 	sc->sc_debug = dflags;
   17584 	device_printf(sc->sc_dev, "TARC0: %08x\n", CSR_READ(sc, WMREG_TARC0));
   17585 	device_printf(sc->sc_dev, "TDT0: %08x\n", CSR_READ(sc, WMREG_TDT(0)));
   17586 
   17587 	return 0;
   17588 }
   17589 #endif
   17590