Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.693
      1 /*	$NetBSD: if_wm.c,v 1.693 2020/10/30 06:23:39 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.693 2020/10/30 06:23:39 msaitoh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 #include <sys/sysctl.h>
    109 #include <sys/workqueue.h>
    110 
    111 #include <sys/rndsource.h>
    112 
    113 #include <net/if.h>
    114 #include <net/if_dl.h>
    115 #include <net/if_media.h>
    116 #include <net/if_ether.h>
    117 
    118 #include <net/bpf.h>
    119 
    120 #include <net/rss_config.h>
    121 
    122 #include <netinet/in.h>			/* XXX for struct ip */
    123 #include <netinet/in_systm.h>		/* XXX for struct ip */
    124 #include <netinet/ip.h>			/* XXX for struct ip */
    125 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    126 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    127 
    128 #include <sys/bus.h>
    129 #include <sys/intr.h>
    130 #include <machine/endian.h>
    131 
    132 #include <dev/mii/mii.h>
    133 #include <dev/mii/mdio.h>
    134 #include <dev/mii/miivar.h>
    135 #include <dev/mii/miidevs.h>
    136 #include <dev/mii/mii_bitbang.h>
    137 #include <dev/mii/ikphyreg.h>
    138 #include <dev/mii/igphyreg.h>
    139 #include <dev/mii/igphyvar.h>
    140 #include <dev/mii/inbmphyreg.h>
    141 #include <dev/mii/ihphyreg.h>
    142 #include <dev/mii/makphyreg.h>
    143 
    144 #include <dev/pci/pcireg.h>
    145 #include <dev/pci/pcivar.h>
    146 #include <dev/pci/pcidevs.h>
    147 
    148 #include <dev/pci/if_wmreg.h>
    149 #include <dev/pci/if_wmvar.h>
    150 
    151 #ifdef WM_DEBUG
    152 #define	WM_DEBUG_LINK		__BIT(0)
    153 #define	WM_DEBUG_TX		__BIT(1)
    154 #define	WM_DEBUG_RX		__BIT(2)
    155 #define	WM_DEBUG_GMII		__BIT(3)
    156 #define	WM_DEBUG_MANAGE		__BIT(4)
    157 #define	WM_DEBUG_NVM		__BIT(5)
    158 #define	WM_DEBUG_INIT		__BIT(6)
    159 #define	WM_DEBUG_LOCK		__BIT(7)
    160 
    161 #if 0
    162 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
    163 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
    164 	WM_DEBUG_LOCK
    165 #endif
    166 
    167 #define	DPRINTF(sc, x, y)			  \
    168 	do {					  \
    169 		if ((sc)->sc_debug & (x))	  \
    170 			printf y;		  \
    171 	} while (0)
    172 #else
    173 #define	DPRINTF(sc, x, y)	__nothing
    174 #endif /* WM_DEBUG */
    175 
    176 #ifdef NET_MPSAFE
    177 #define WM_MPSAFE	1
    178 #define WM_CALLOUT_FLAGS	CALLOUT_MPSAFE
    179 #define WM_SOFTINT_FLAGS	SOFTINT_MPSAFE
    180 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    181 #else
    182 #define WM_CALLOUT_FLAGS	0
    183 #define WM_SOFTINT_FLAGS	0
    184 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU
    185 #endif
    186 
    187 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    188 
    189 /*
    190  * This device driver's max interrupt numbers.
    191  */
    192 #define WM_MAX_NQUEUEINTR	16
    193 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    194 
    195 #ifndef WM_DISABLE_MSI
    196 #define	WM_DISABLE_MSI 0
    197 #endif
    198 #ifndef WM_DISABLE_MSIX
    199 #define	WM_DISABLE_MSIX 0
    200 #endif
    201 
    202 int wm_disable_msi = WM_DISABLE_MSI;
    203 int wm_disable_msix = WM_DISABLE_MSIX;
    204 
    205 #ifndef WM_WATCHDOG_TIMEOUT
    206 #define WM_WATCHDOG_TIMEOUT 5
    207 #endif
    208 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    209 
    210 /*
    211  * Transmit descriptor list size.  Due to errata, we can only have
    212  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    213  * on >= 82544. We tell the upper layers that they can queue a lot
    214  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    215  * of them at a time.
    216  *
    217  * We allow up to 64 DMA segments per packet.  Pathological packet
    218  * chains containing many small mbufs have been observed in zero-copy
    219  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    220  * m_defrag() is called to reduce it.
    221  */
    222 #define	WM_NTXSEGS		64
    223 #define	WM_IFQUEUELEN		256
    224 #define	WM_TXQUEUELEN_MAX	64
    225 #define	WM_TXQUEUELEN_MAX_82547	16
    226 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    227 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    228 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    229 #define	WM_NTXDESC_82542	256
    230 #define	WM_NTXDESC_82544	4096
    231 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    232 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    233 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    234 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    235 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    236 
    237 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    238 
    239 #define	WM_TXINTERQSIZE		256
    240 
    241 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    242 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    243 #endif
    244 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    245 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    246 #endif
    247 
    248 /*
    249  * Receive descriptor list size.  We have one Rx buffer for normal
    250  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    251  * packet.  We allocate 256 receive descriptors, each with a 2k
    252  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    253  */
    254 #define	WM_NRXDESC		256U
    255 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    256 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    257 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    258 
    259 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    260 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    261 #endif
    262 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    263 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    264 #endif
    265 
    266 typedef union txdescs {
    267 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    268 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    269 } txdescs_t;
    270 
    271 typedef union rxdescs {
    272 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    273 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    274 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    275 } rxdescs_t;
    276 
    277 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    278 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    279 
    280 /*
    281  * Software state for transmit jobs.
    282  */
    283 struct wm_txsoft {
    284 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    285 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    286 	int txs_firstdesc;		/* first descriptor in packet */
    287 	int txs_lastdesc;		/* last descriptor in packet */
    288 	int txs_ndesc;			/* # of descriptors used */
    289 };
    290 
    291 /*
    292  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    293  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    294  * them together.
    295  */
    296 struct wm_rxsoft {
    297 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    298 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    299 };
    300 
    301 #define WM_LINKUP_TIMEOUT	50
    302 
    303 static uint16_t swfwphysem[] = {
    304 	SWFW_PHY0_SM,
    305 	SWFW_PHY1_SM,
    306 	SWFW_PHY2_SM,
    307 	SWFW_PHY3_SM
    308 };
    309 
    310 static const uint32_t wm_82580_rxpbs_table[] = {
    311 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    312 };
    313 
    314 struct wm_softc;
    315 
    316 #ifdef WM_EVENT_COUNTERS
    317 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    318 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    319 	struct evcnt qname##_ev_##evname;
    320 
    321 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    322 	do {								\
    323 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    324 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    325 		    "%s%02d%s", #qname, (qnum), #evname);		\
    326 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    327 		    (evtype), NULL, (xname),				\
    328 		    (q)->qname##_##evname##_evcnt_name);		\
    329 	} while (0)
    330 
    331 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    332 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    333 
    334 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    335 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    336 
    337 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    338 	evcnt_detach(&(q)->qname##_ev_##evname);
    339 #endif /* WM_EVENT_COUNTERS */
    340 
    341 struct wm_txqueue {
    342 	kmutex_t *txq_lock;		/* lock for tx operations */
    343 
    344 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    345 
    346 	/* Software state for the transmit descriptors. */
    347 	int txq_num;			/* must be a power of two */
    348 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    349 
    350 	/* TX control data structures. */
    351 	int txq_ndesc;			/* must be a power of two */
    352 	size_t txq_descsize;		/* a tx descriptor size */
    353 	txdescs_t *txq_descs_u;
    354 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    355 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    356 	int txq_desc_rseg;		/* real number of control segment */
    357 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    358 #define	txq_descs	txq_descs_u->sctxu_txdescs
    359 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    360 
    361 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    362 
    363 	int txq_free;			/* number of free Tx descriptors */
    364 	int txq_next;			/* next ready Tx descriptor */
    365 
    366 	int txq_sfree;			/* number of free Tx jobs */
    367 	int txq_snext;			/* next free Tx job */
    368 	int txq_sdirty;			/* dirty Tx jobs */
    369 
    370 	/* These 4 variables are used only on the 82547. */
    371 	int txq_fifo_size;		/* Tx FIFO size */
    372 	int txq_fifo_head;		/* current head of FIFO */
    373 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    374 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    375 
    376 	/*
    377 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    378 	 * CPUs. This queue intermediate them without block.
    379 	 */
    380 	pcq_t *txq_interq;
    381 
    382 	/*
    383 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    384 	 * to manage Tx H/W queue's busy flag.
    385 	 */
    386 	int txq_flags;			/* flags for H/W queue, see below */
    387 #define	WM_TXQ_NO_SPACE	0x1
    388 
    389 	bool txq_stopping;
    390 
    391 	bool txq_sending;
    392 	time_t txq_lastsent;
    393 
    394 	/* Checksum flags used for previous packet */
    395 	uint32_t 	txq_last_hw_cmd;
    396 	uint8_t 	txq_last_hw_fields;
    397 	uint16_t	txq_last_hw_ipcs;
    398 	uint16_t	txq_last_hw_tucs;
    399 
    400 	uint32_t txq_packets;		/* for AIM */
    401 	uint32_t txq_bytes;		/* for AIM */
    402 #ifdef WM_EVENT_COUNTERS
    403 	/* TX event counters */
    404 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    405 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    406 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    407 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    408 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    409 					    /* XXX not used? */
    410 
    411 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    412 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    413 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    414 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    415 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    416 	WM_Q_EVCNT_DEFINE(txq, tsopain)	    /* Painful header manip. for TSO */
    417 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    418 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    419 					    /* other than toomanyseg */
    420 
    421 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    422 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    423 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    424 	WM_Q_EVCNT_DEFINE(txq, skipcontext) /* Tx skip wring cksum context */
    425 
    426 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    427 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    428 #endif /* WM_EVENT_COUNTERS */
    429 };
    430 
    431 struct wm_rxqueue {
    432 	kmutex_t *rxq_lock;		/* lock for rx operations */
    433 
    434 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    435 
    436 	/* Software state for the receive descriptors. */
    437 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    438 
    439 	/* RX control data structures. */
    440 	int rxq_ndesc;			/* must be a power of two */
    441 	size_t rxq_descsize;		/* a rx descriptor size */
    442 	rxdescs_t *rxq_descs_u;
    443 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    444 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    445 	int rxq_desc_rseg;		/* real number of control segment */
    446 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    447 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    448 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    449 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    450 
    451 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    452 
    453 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    454 	int rxq_discard;
    455 	int rxq_len;
    456 	struct mbuf *rxq_head;
    457 	struct mbuf *rxq_tail;
    458 	struct mbuf **rxq_tailp;
    459 
    460 	bool rxq_stopping;
    461 
    462 	uint32_t rxq_packets;		/* for AIM */
    463 	uint32_t rxq_bytes;		/* for AIM */
    464 #ifdef WM_EVENT_COUNTERS
    465 	/* RX event counters */
    466 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    467 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    468 
    469 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    470 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    471 #endif
    472 };
    473 
    474 struct wm_queue {
    475 	int wmq_id;			/* index of TX/RX queues */
    476 	int wmq_intr_idx;		/* index of MSI-X tables */
    477 
    478 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    479 	bool wmq_set_itr;
    480 
    481 	struct wm_txqueue wmq_txq;
    482 	struct wm_rxqueue wmq_rxq;
    483 
    484 	bool wmq_txrx_use_workqueue;
    485 	struct work wmq_cookie;
    486 	void *wmq_si;
    487 };
    488 
    489 struct wm_phyop {
    490 	int (*acquire)(struct wm_softc *);
    491 	void (*release)(struct wm_softc *);
    492 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    493 	int (*writereg_locked)(device_t, int, int, uint16_t);
    494 	int reset_delay_us;
    495 	bool no_errprint;
    496 };
    497 
    498 struct wm_nvmop {
    499 	int (*acquire)(struct wm_softc *);
    500 	void (*release)(struct wm_softc *);
    501 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    502 };
    503 
    504 /*
    505  * Software state per device.
    506  */
    507 struct wm_softc {
    508 	device_t sc_dev;		/* generic device information */
    509 	bus_space_tag_t sc_st;		/* bus space tag */
    510 	bus_space_handle_t sc_sh;	/* bus space handle */
    511 	bus_size_t sc_ss;		/* bus space size */
    512 	bus_space_tag_t sc_iot;		/* I/O space tag */
    513 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    514 	bus_size_t sc_ios;		/* I/O space size */
    515 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    516 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    517 	bus_size_t sc_flashs;		/* flash registers space size */
    518 	off_t sc_flashreg_offset;	/*
    519 					 * offset to flash registers from
    520 					 * start of BAR
    521 					 */
    522 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    523 
    524 	struct ethercom sc_ethercom;	/* ethernet common data */
    525 	struct mii_data sc_mii;		/* MII/media information */
    526 
    527 	pci_chipset_tag_t sc_pc;
    528 	pcitag_t sc_pcitag;
    529 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    530 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    531 
    532 	uint16_t sc_pcidevid;		/* PCI device ID */
    533 	wm_chip_type sc_type;		/* MAC type */
    534 	int sc_rev;			/* MAC revision */
    535 	wm_phy_type sc_phytype;		/* PHY type */
    536 	uint8_t sc_sfptype;		/* SFP type */
    537 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    538 #define	WM_MEDIATYPE_UNKNOWN		0x00
    539 #define	WM_MEDIATYPE_FIBER		0x01
    540 #define	WM_MEDIATYPE_COPPER		0x02
    541 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    542 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    543 	int sc_flags;			/* flags; see below */
    544 	u_short sc_if_flags;		/* last if_flags */
    545 	int sc_ec_capenable;		/* last ec_capenable */
    546 	int sc_flowflags;		/* 802.3x flow control flags */
    547 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    548 	int sc_align_tweak;
    549 
    550 	void *sc_ihs[WM_MAX_NINTR];	/*
    551 					 * interrupt cookie.
    552 					 * - legacy and msi use sc_ihs[0] only
    553 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    554 					 */
    555 	pci_intr_handle_t *sc_intrs;	/*
    556 					 * legacy and msi use sc_intrs[0] only
    557 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    558 					 */
    559 	int sc_nintrs;			/* number of interrupts */
    560 
    561 	int sc_link_intr_idx;		/* index of MSI-X tables */
    562 
    563 	callout_t sc_tick_ch;		/* tick callout */
    564 	bool sc_core_stopping;
    565 
    566 	int sc_nvm_ver_major;
    567 	int sc_nvm_ver_minor;
    568 	int sc_nvm_ver_build;
    569 	int sc_nvm_addrbits;		/* NVM address bits */
    570 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    571 	int sc_ich8_flash_base;
    572 	int sc_ich8_flash_bank_size;
    573 	int sc_nvm_k1_enabled;
    574 
    575 	int sc_nqueues;
    576 	struct wm_queue *sc_queue;
    577 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    578 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    579 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    580 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    581 	struct workqueue *sc_queue_wq;
    582 	bool sc_txrx_use_workqueue;
    583 
    584 	int sc_affinity_offset;
    585 
    586 #ifdef WM_EVENT_COUNTERS
    587 	/* Event counters. */
    588 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    589 
    590 	/* WM_T_82542_2_1 only */
    591 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    592 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    593 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    594 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    595 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    596 #endif /* WM_EVENT_COUNTERS */
    597 
    598 	struct sysctllog *sc_sysctllog;
    599 
    600 	/* This variable are used only on the 82547. */
    601 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    602 
    603 	uint32_t sc_ctrl;		/* prototype CTRL register */
    604 #if 0
    605 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    606 #endif
    607 	uint32_t sc_icr;		/* prototype interrupt bits */
    608 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    609 	uint32_t sc_tctl;		/* prototype TCTL register */
    610 	uint32_t sc_rctl;		/* prototype RCTL register */
    611 	uint32_t sc_txcw;		/* prototype TXCW register */
    612 	uint32_t sc_tipg;		/* prototype TIPG register */
    613 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    614 	uint32_t sc_pba;		/* prototype PBA register */
    615 
    616 	int sc_tbi_linkup;		/* TBI link status */
    617 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    618 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    619 
    620 	int sc_mchash_type;		/* multicast filter offset */
    621 
    622 	krndsource_t rnd_source;	/* random source */
    623 
    624 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    625 
    626 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    627 	kmutex_t *sc_ich_phymtx;	/*
    628 					 * 82574/82583/ICH/PCH specific PHY
    629 					 * mutex. For 82574/82583, the mutex
    630 					 * is used for both PHY and NVM.
    631 					 */
    632 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    633 
    634 	struct wm_phyop phy;
    635 	struct wm_nvmop nvm;
    636 #ifdef WM_DEBUG
    637 	uint32_t sc_debug;
    638 #endif
    639 };
    640 
    641 #define WM_CORE_LOCK(_sc)						\
    642 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    643 #define WM_CORE_UNLOCK(_sc)						\
    644 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    645 #define WM_CORE_LOCKED(_sc)						\
    646 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    647 
    648 #define	WM_RXCHAIN_RESET(rxq)						\
    649 do {									\
    650 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    651 	*(rxq)->rxq_tailp = NULL;					\
    652 	(rxq)->rxq_len = 0;						\
    653 } while (/*CONSTCOND*/0)
    654 
    655 #define	WM_RXCHAIN_LINK(rxq, m)						\
    656 do {									\
    657 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    658 	(rxq)->rxq_tailp = &(m)->m_next;				\
    659 } while (/*CONSTCOND*/0)
    660 
    661 #ifdef WM_EVENT_COUNTERS
    662 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    663 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    664 
    665 #define WM_Q_EVCNT_INCR(qname, evname)			\
    666 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    667 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    668 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    669 #else /* !WM_EVENT_COUNTERS */
    670 #define	WM_EVCNT_INCR(ev)	/* nothing */
    671 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    672 
    673 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    674 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    675 #endif /* !WM_EVENT_COUNTERS */
    676 
    677 #define	CSR_READ(sc, reg)						\
    678 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    679 #define	CSR_WRITE(sc, reg, val)						\
    680 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    681 #define	CSR_WRITE_FLUSH(sc)						\
    682 	(void)CSR_READ((sc), WMREG_STATUS)
    683 
    684 #define ICH8_FLASH_READ32(sc, reg)					\
    685 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    686 	    (reg) + sc->sc_flashreg_offset)
    687 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    688 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    689 	    (reg) + sc->sc_flashreg_offset, (data))
    690 
    691 #define ICH8_FLASH_READ16(sc, reg)					\
    692 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    693 	    (reg) + sc->sc_flashreg_offset)
    694 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    695 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    696 	    (reg) + sc->sc_flashreg_offset, (data))
    697 
    698 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    699 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    700 
    701 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    702 #define	WM_CDTXADDR_HI(txq, x)						\
    703 	(sizeof(bus_addr_t) == 8 ?					\
    704 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    705 
    706 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    707 #define	WM_CDRXADDR_HI(rxq, x)						\
    708 	(sizeof(bus_addr_t) == 8 ?					\
    709 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    710 
    711 /*
    712  * Register read/write functions.
    713  * Other than CSR_{READ|WRITE}().
    714  */
    715 #if 0
    716 static inline uint32_t wm_io_read(struct wm_softc *, int);
    717 #endif
    718 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    719 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    720     uint32_t, uint32_t);
    721 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    722 
    723 /*
    724  * Descriptor sync/init functions.
    725  */
    726 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    727 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    728 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    729 
    730 /*
    731  * Device driver interface functions and commonly used functions.
    732  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    733  */
    734 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    735 static int	wm_match(device_t, cfdata_t, void *);
    736 static void	wm_attach(device_t, device_t, void *);
    737 static int	wm_detach(device_t, int);
    738 static bool	wm_suspend(device_t, const pmf_qual_t *);
    739 static bool	wm_resume(device_t, const pmf_qual_t *);
    740 static void	wm_watchdog(struct ifnet *);
    741 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    742     uint16_t *);
    743 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    744     uint16_t *);
    745 static void	wm_tick(void *);
    746 static int	wm_ifflags_cb(struct ethercom *);
    747 static int	wm_ioctl(struct ifnet *, u_long, void *);
    748 /* MAC address related */
    749 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    750 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    751 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    752 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    753 static int	wm_rar_count(struct wm_softc *);
    754 static void	wm_set_filter(struct wm_softc *);
    755 /* Reset and init related */
    756 static void	wm_set_vlan(struct wm_softc *);
    757 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    758 static void	wm_get_auto_rd_done(struct wm_softc *);
    759 static void	wm_lan_init_done(struct wm_softc *);
    760 static void	wm_get_cfg_done(struct wm_softc *);
    761 static int	wm_phy_post_reset(struct wm_softc *);
    762 static int	wm_write_smbus_addr(struct wm_softc *);
    763 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    764 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    765 static void	wm_initialize_hardware_bits(struct wm_softc *);
    766 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    767 static int	wm_reset_phy(struct wm_softc *);
    768 static void	wm_flush_desc_rings(struct wm_softc *);
    769 static void	wm_reset(struct wm_softc *);
    770 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    771 static void	wm_rxdrain(struct wm_rxqueue *);
    772 static void	wm_init_rss(struct wm_softc *);
    773 static void	wm_adjust_qnum(struct wm_softc *, int);
    774 static inline bool	wm_is_using_msix(struct wm_softc *);
    775 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    776 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    777 static int	wm_setup_legacy(struct wm_softc *);
    778 static int	wm_setup_msix(struct wm_softc *);
    779 static int	wm_init(struct ifnet *);
    780 static int	wm_init_locked(struct ifnet *);
    781 static void	wm_init_sysctls(struct wm_softc *);
    782 static void	wm_unset_stopping_flags(struct wm_softc *);
    783 static void	wm_set_stopping_flags(struct wm_softc *);
    784 static void	wm_stop(struct ifnet *, int);
    785 static void	wm_stop_locked(struct ifnet *, bool, bool);
    786 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    787 static void	wm_82547_txfifo_stall(void *);
    788 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    789 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    790 /* DMA related */
    791 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    792 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    793 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    794 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    795     struct wm_txqueue *);
    796 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    797 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    798 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    799     struct wm_rxqueue *);
    800 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    801 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    802 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    803 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    804 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    805 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    806 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    807     struct wm_txqueue *);
    808 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    809     struct wm_rxqueue *);
    810 static int	wm_alloc_txrx_queues(struct wm_softc *);
    811 static void	wm_free_txrx_queues(struct wm_softc *);
    812 static int	wm_init_txrx_queues(struct wm_softc *);
    813 /* Start */
    814 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    815     struct wm_txsoft *, uint32_t *, uint8_t *);
    816 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    817 static void	wm_start(struct ifnet *);
    818 static void	wm_start_locked(struct ifnet *);
    819 static int	wm_transmit(struct ifnet *, struct mbuf *);
    820 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    821 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    822 		    bool);
    823 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    824     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    825 static void	wm_nq_start(struct ifnet *);
    826 static void	wm_nq_start_locked(struct ifnet *);
    827 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    828 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    829 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    830 		    bool);
    831 static void	wm_deferred_start_locked(struct wm_txqueue *);
    832 static void	wm_handle_queue(void *);
    833 static void	wm_handle_queue_work(struct work *, void *);
    834 /* Interrupt */
    835 static bool	wm_txeof(struct wm_txqueue *, u_int);
    836 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    837 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    838 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    839 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    840 static void	wm_linkintr(struct wm_softc *, uint32_t);
    841 static int	wm_intr_legacy(void *);
    842 static inline void	wm_txrxintr_disable(struct wm_queue *);
    843 static inline void	wm_txrxintr_enable(struct wm_queue *);
    844 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    845 static int	wm_txrxintr_msix(void *);
    846 static int	wm_linkintr_msix(void *);
    847 
    848 /*
    849  * Media related.
    850  * GMII, SGMII, TBI, SERDES and SFP.
    851  */
    852 /* Common */
    853 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    854 /* GMII related */
    855 static void	wm_gmii_reset(struct wm_softc *);
    856 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    857 static int	wm_get_phy_id_82575(struct wm_softc *);
    858 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    859 static int	wm_gmii_mediachange(struct ifnet *);
    860 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    861 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    862 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    863 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    864 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    865 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    866 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    867 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    868 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    869 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    870 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    871 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    872 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    873 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    874 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    875 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    876 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    877 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    878 	bool);
    879 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    880 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    881 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    882 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    883 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    884 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    885 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    886 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    887 static void	wm_gmii_statchg(struct ifnet *);
    888 /*
    889  * kumeran related (80003, ICH* and PCH*).
    890  * These functions are not for accessing MII registers but for accessing
    891  * kumeran specific registers.
    892  */
    893 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    894 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    895 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    896 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    897 /* EMI register related */
    898 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    899 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    900 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    901 /* SGMII */
    902 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    903 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
    904 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    905 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    906 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    907 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    908 /* TBI related */
    909 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    910 static void	wm_tbi_mediainit(struct wm_softc *);
    911 static int	wm_tbi_mediachange(struct ifnet *);
    912 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    913 static int	wm_check_for_link(struct wm_softc *);
    914 static void	wm_tbi_tick(struct wm_softc *);
    915 /* SERDES related */
    916 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    917 static int	wm_serdes_mediachange(struct ifnet *);
    918 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    919 static void	wm_serdes_tick(struct wm_softc *);
    920 /* SFP related */
    921 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    922 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    923 
    924 /*
    925  * NVM related.
    926  * Microwire, SPI (w/wo EERD) and Flash.
    927  */
    928 /* Misc functions */
    929 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    930 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    931 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    932 /* Microwire */
    933 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    934 /* SPI */
    935 static int	wm_nvm_ready_spi(struct wm_softc *);
    936 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    937 /* Using with EERD */
    938 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    939 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    940 /* Flash */
    941 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    942     unsigned int *);
    943 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    944 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    945 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    946     uint32_t *);
    947 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    948 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    949 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    950 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    951 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    952 /* iNVM */
    953 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    954 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    955 /* Lock, detecting NVM type, validate checksum and read */
    956 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    957 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    958 static int	wm_nvm_validate_checksum(struct wm_softc *);
    959 static void	wm_nvm_version_invm(struct wm_softc *);
    960 static void	wm_nvm_version(struct wm_softc *);
    961 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    962 
    963 /*
    964  * Hardware semaphores.
    965  * Very complexed...
    966  */
    967 static int	wm_get_null(struct wm_softc *);
    968 static void	wm_put_null(struct wm_softc *);
    969 static int	wm_get_eecd(struct wm_softc *);
    970 static void	wm_put_eecd(struct wm_softc *);
    971 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    972 static void	wm_put_swsm_semaphore(struct wm_softc *);
    973 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    974 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    975 static int	wm_get_nvm_80003(struct wm_softc *);
    976 static void	wm_put_nvm_80003(struct wm_softc *);
    977 static int	wm_get_nvm_82571(struct wm_softc *);
    978 static void	wm_put_nvm_82571(struct wm_softc *);
    979 static int	wm_get_phy_82575(struct wm_softc *);
    980 static void	wm_put_phy_82575(struct wm_softc *);
    981 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    982 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    983 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    984 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    985 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    986 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    987 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    988 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    989 
    990 /*
    991  * Management mode and power management related subroutines.
    992  * BMC, AMT, suspend/resume and EEE.
    993  */
    994 #if 0
    995 static int	wm_check_mng_mode(struct wm_softc *);
    996 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    997 static int	wm_check_mng_mode_82574(struct wm_softc *);
    998 static int	wm_check_mng_mode_generic(struct wm_softc *);
    999 #endif
   1000 static int	wm_enable_mng_pass_thru(struct wm_softc *);
   1001 static bool	wm_phy_resetisblocked(struct wm_softc *);
   1002 static void	wm_get_hw_control(struct wm_softc *);
   1003 static void	wm_release_hw_control(struct wm_softc *);
   1004 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
   1005 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
   1006 static void	wm_init_manageability(struct wm_softc *);
   1007 static void	wm_release_manageability(struct wm_softc *);
   1008 static void	wm_get_wakeup(struct wm_softc *);
   1009 static int	wm_ulp_disable(struct wm_softc *);
   1010 static int	wm_enable_phy_wakeup(struct wm_softc *);
   1011 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
   1012 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
   1013 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1014 static void	wm_enable_wakeup(struct wm_softc *);
   1015 static void	wm_disable_aspm(struct wm_softc *);
   1016 /* LPLU (Low Power Link Up) */
   1017 static void	wm_lplu_d0_disable(struct wm_softc *);
   1018 /* EEE */
   1019 static int	wm_set_eee_i350(struct wm_softc *);
   1020 static int	wm_set_eee_pchlan(struct wm_softc *);
   1021 static int	wm_set_eee(struct wm_softc *);
   1022 
   1023 /*
   1024  * Workarounds (mainly PHY related).
   1025  * Basically, PHY's workarounds are in the PHY drivers.
   1026  */
   1027 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1028 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1029 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1030 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1031 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
   1032 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
   1033 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1034 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1035 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1036 static int	wm_k1_workaround_lv(struct wm_softc *);
   1037 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1038 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1039 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1040 static void	wm_reset_init_script_82575(struct wm_softc *);
   1041 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1042 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1043 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1044 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1045 static int	wm_pll_workaround_i210(struct wm_softc *);
   1046 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1047 
   1048 #ifdef WM_DEBUG
   1049 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
   1050 #endif
   1051 
   1052 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1053     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1054 
   1055 /*
   1056  * Devices supported by this driver.
   1057  */
   1058 static const struct wm_product {
   1059 	pci_vendor_id_t		wmp_vendor;
   1060 	pci_product_id_t	wmp_product;
   1061 	const char		*wmp_name;
   1062 	wm_chip_type		wmp_type;
   1063 	uint32_t		wmp_flags;
   1064 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1065 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1066 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1067 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1068 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1069 } wm_products[] = {
   1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1071 	  "Intel i82542 1000BASE-X Ethernet",
   1072 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1073 
   1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1075 	  "Intel i82543GC 1000BASE-X Ethernet",
   1076 	  WM_T_82543,		WMP_F_FIBER },
   1077 
   1078 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1079 	  "Intel i82543GC 1000BASE-T Ethernet",
   1080 	  WM_T_82543,		WMP_F_COPPER },
   1081 
   1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1083 	  "Intel i82544EI 1000BASE-T Ethernet",
   1084 	  WM_T_82544,		WMP_F_COPPER },
   1085 
   1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1087 	  "Intel i82544EI 1000BASE-X Ethernet",
   1088 	  WM_T_82544,		WMP_F_FIBER },
   1089 
   1090 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1091 	  "Intel i82544GC 1000BASE-T Ethernet",
   1092 	  WM_T_82544,		WMP_F_COPPER },
   1093 
   1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1095 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1096 	  WM_T_82544,		WMP_F_COPPER },
   1097 
   1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1099 	  "Intel i82540EM 1000BASE-T Ethernet",
   1100 	  WM_T_82540,		WMP_F_COPPER },
   1101 
   1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1103 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1104 	  WM_T_82540,		WMP_F_COPPER },
   1105 
   1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1107 	  "Intel i82540EP 1000BASE-T Ethernet",
   1108 	  WM_T_82540,		WMP_F_COPPER },
   1109 
   1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1111 	  "Intel i82540EP 1000BASE-T Ethernet",
   1112 	  WM_T_82540,		WMP_F_COPPER },
   1113 
   1114 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1115 	  "Intel i82540EP 1000BASE-T Ethernet",
   1116 	  WM_T_82540,		WMP_F_COPPER },
   1117 
   1118 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1119 	  "Intel i82545EM 1000BASE-T Ethernet",
   1120 	  WM_T_82545,		WMP_F_COPPER },
   1121 
   1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1123 	  "Intel i82545GM 1000BASE-T Ethernet",
   1124 	  WM_T_82545_3,		WMP_F_COPPER },
   1125 
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1127 	  "Intel i82545GM 1000BASE-X Ethernet",
   1128 	  WM_T_82545_3,		WMP_F_FIBER },
   1129 
   1130 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1131 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1132 	  WM_T_82545_3,		WMP_F_SERDES },
   1133 
   1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1135 	  "Intel i82546EB 1000BASE-T Ethernet",
   1136 	  WM_T_82546,		WMP_F_COPPER },
   1137 
   1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1139 	  "Intel i82546EB 1000BASE-T Ethernet",
   1140 	  WM_T_82546,		WMP_F_COPPER },
   1141 
   1142 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1143 	  "Intel i82545EM 1000BASE-X Ethernet",
   1144 	  WM_T_82545,		WMP_F_FIBER },
   1145 
   1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1147 	  "Intel i82546EB 1000BASE-X Ethernet",
   1148 	  WM_T_82546,		WMP_F_FIBER },
   1149 
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1151 	  "Intel i82546GB 1000BASE-T Ethernet",
   1152 	  WM_T_82546_3,		WMP_F_COPPER },
   1153 
   1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1155 	  "Intel i82546GB 1000BASE-X Ethernet",
   1156 	  WM_T_82546_3,		WMP_F_FIBER },
   1157 
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1159 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1160 	  WM_T_82546_3,		WMP_F_SERDES },
   1161 
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1163 	  "i82546GB quad-port Gigabit Ethernet",
   1164 	  WM_T_82546_3,		WMP_F_COPPER },
   1165 
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1167 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1168 	  WM_T_82546_3,		WMP_F_COPPER },
   1169 
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1171 	  "Intel PRO/1000MT (82546GB)",
   1172 	  WM_T_82546_3,		WMP_F_COPPER },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1175 	  "Intel i82541EI 1000BASE-T Ethernet",
   1176 	  WM_T_82541,		WMP_F_COPPER },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1179 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1180 	  WM_T_82541,		WMP_F_COPPER },
   1181 
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1183 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1184 	  WM_T_82541,		WMP_F_COPPER },
   1185 
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1187 	  "Intel i82541ER 1000BASE-T Ethernet",
   1188 	  WM_T_82541_2,		WMP_F_COPPER },
   1189 
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1191 	  "Intel i82541GI 1000BASE-T Ethernet",
   1192 	  WM_T_82541_2,		WMP_F_COPPER },
   1193 
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1195 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1196 	  WM_T_82541_2,		WMP_F_COPPER },
   1197 
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1199 	  "Intel i82541PI 1000BASE-T Ethernet",
   1200 	  WM_T_82541_2,		WMP_F_COPPER },
   1201 
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1203 	  "Intel i82547EI 1000BASE-T Ethernet",
   1204 	  WM_T_82547,		WMP_F_COPPER },
   1205 
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1207 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1208 	  WM_T_82547,		WMP_F_COPPER },
   1209 
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1211 	  "Intel i82547GI 1000BASE-T Ethernet",
   1212 	  WM_T_82547_2,		WMP_F_COPPER },
   1213 
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1215 	  "Intel PRO/1000 PT (82571EB)",
   1216 	  WM_T_82571,		WMP_F_COPPER },
   1217 
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1219 	  "Intel PRO/1000 PF (82571EB)",
   1220 	  WM_T_82571,		WMP_F_FIBER },
   1221 
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1223 	  "Intel PRO/1000 PB (82571EB)",
   1224 	  WM_T_82571,		WMP_F_SERDES },
   1225 
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1227 	  "Intel PRO/1000 QT (82571EB)",
   1228 	  WM_T_82571,		WMP_F_COPPER },
   1229 
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1231 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1232 	  WM_T_82571,		WMP_F_COPPER },
   1233 
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1235 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1236 	  WM_T_82571,		WMP_F_COPPER },
   1237 
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1239 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1240 	  WM_T_82571,		WMP_F_SERDES },
   1241 
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1243 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1244 	  WM_T_82571,		WMP_F_SERDES },
   1245 
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1247 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1248 	  WM_T_82571,		WMP_F_FIBER },
   1249 
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1251 	  "Intel i82572EI 1000baseT Ethernet",
   1252 	  WM_T_82572,		WMP_F_COPPER },
   1253 
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1255 	  "Intel i82572EI 1000baseX Ethernet",
   1256 	  WM_T_82572,		WMP_F_FIBER },
   1257 
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1259 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1260 	  WM_T_82572,		WMP_F_SERDES },
   1261 
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1263 	  "Intel i82572EI 1000baseT Ethernet",
   1264 	  WM_T_82572,		WMP_F_COPPER },
   1265 
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1267 	  "Intel i82573E",
   1268 	  WM_T_82573,		WMP_F_COPPER },
   1269 
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1271 	  "Intel i82573E IAMT",
   1272 	  WM_T_82573,		WMP_F_COPPER },
   1273 
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1275 	  "Intel i82573L Gigabit Ethernet",
   1276 	  WM_T_82573,		WMP_F_COPPER },
   1277 
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1279 	  "Intel i82574L",
   1280 	  WM_T_82574,		WMP_F_COPPER },
   1281 
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1283 	  "Intel i82574L",
   1284 	  WM_T_82574,		WMP_F_COPPER },
   1285 
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1287 	  "Intel i82583V",
   1288 	  WM_T_82583,		WMP_F_COPPER },
   1289 
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1291 	  "i80003 dual 1000baseT Ethernet",
   1292 	  WM_T_80003,		WMP_F_COPPER },
   1293 
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1295 	  "i80003 dual 1000baseX Ethernet",
   1296 	  WM_T_80003,		WMP_F_COPPER },
   1297 
   1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1299 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1300 	  WM_T_80003,		WMP_F_SERDES },
   1301 
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1303 	  "Intel i80003 1000baseT Ethernet",
   1304 	  WM_T_80003,		WMP_F_COPPER },
   1305 
   1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1307 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1308 	  WM_T_80003,		WMP_F_SERDES },
   1309 
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1311 	  "Intel i82801H (M_AMT) LAN Controller",
   1312 	  WM_T_ICH8,		WMP_F_COPPER },
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1314 	  "Intel i82801H (AMT) LAN Controller",
   1315 	  WM_T_ICH8,		WMP_F_COPPER },
   1316 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1317 	  "Intel i82801H LAN Controller",
   1318 	  WM_T_ICH8,		WMP_F_COPPER },
   1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1320 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1321 	  WM_T_ICH8,		WMP_F_COPPER },
   1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1323 	  "Intel i82801H (M) LAN Controller",
   1324 	  WM_T_ICH8,		WMP_F_COPPER },
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1326 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1327 	  WM_T_ICH8,		WMP_F_COPPER },
   1328 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1329 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1330 	  WM_T_ICH8,		WMP_F_COPPER },
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1332 	  "82567V-3 LAN Controller",
   1333 	  WM_T_ICH8,		WMP_F_COPPER },
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1335 	  "82801I (AMT) LAN Controller",
   1336 	  WM_T_ICH9,		WMP_F_COPPER },
   1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1338 	  "82801I 10/100 LAN Controller",
   1339 	  WM_T_ICH9,		WMP_F_COPPER },
   1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1341 	  "82801I (G) 10/100 LAN Controller",
   1342 	  WM_T_ICH9,		WMP_F_COPPER },
   1343 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1344 	  "82801I (GT) 10/100 LAN Controller",
   1345 	  WM_T_ICH9,		WMP_F_COPPER },
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1347 	  "82801I (C) LAN Controller",
   1348 	  WM_T_ICH9,		WMP_F_COPPER },
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1350 	  "82801I mobile LAN Controller",
   1351 	  WM_T_ICH9,		WMP_F_COPPER },
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1353 	  "82801I mobile (V) LAN Controller",
   1354 	  WM_T_ICH9,		WMP_F_COPPER },
   1355 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1356 	  "82801I mobile (AMT) LAN Controller",
   1357 	  WM_T_ICH9,		WMP_F_COPPER },
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1359 	  "82567LM-4 LAN Controller",
   1360 	  WM_T_ICH9,		WMP_F_COPPER },
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1362 	  "82567LM-2 LAN Controller",
   1363 	  WM_T_ICH10,		WMP_F_COPPER },
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1365 	  "82567LF-2 LAN Controller",
   1366 	  WM_T_ICH10,		WMP_F_COPPER },
   1367 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1368 	  "82567LM-3 LAN Controller",
   1369 	  WM_T_ICH10,		WMP_F_COPPER },
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1371 	  "82567LF-3 LAN Controller",
   1372 	  WM_T_ICH10,		WMP_F_COPPER },
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1374 	  "82567V-2 LAN Controller",
   1375 	  WM_T_ICH10,		WMP_F_COPPER },
   1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1377 	  "82567V-3? LAN Controller",
   1378 	  WM_T_ICH10,		WMP_F_COPPER },
   1379 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1380 	  "HANKSVILLE LAN Controller",
   1381 	  WM_T_ICH10,		WMP_F_COPPER },
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1383 	  "PCH LAN (82577LM) Controller",
   1384 	  WM_T_PCH,		WMP_F_COPPER },
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1386 	  "PCH LAN (82577LC) Controller",
   1387 	  WM_T_PCH,		WMP_F_COPPER },
   1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1389 	  "PCH LAN (82578DM) Controller",
   1390 	  WM_T_PCH,		WMP_F_COPPER },
   1391 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1392 	  "PCH LAN (82578DC) Controller",
   1393 	  WM_T_PCH,		WMP_F_COPPER },
   1394 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1395 	  "PCH2 LAN (82579LM) Controller",
   1396 	  WM_T_PCH2,		WMP_F_COPPER },
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1398 	  "PCH2 LAN (82579V) Controller",
   1399 	  WM_T_PCH2,		WMP_F_COPPER },
   1400 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1401 	  "82575EB dual-1000baseT Ethernet",
   1402 	  WM_T_82575,		WMP_F_COPPER },
   1403 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1404 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1405 	  WM_T_82575,		WMP_F_SERDES },
   1406 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1407 	  "82575GB quad-1000baseT Ethernet",
   1408 	  WM_T_82575,		WMP_F_COPPER },
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1410 	  "82575GB quad-1000baseT Ethernet (PM)",
   1411 	  WM_T_82575,		WMP_F_COPPER },
   1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1413 	  "82576 1000BaseT Ethernet",
   1414 	  WM_T_82576,		WMP_F_COPPER },
   1415 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1416 	  "82576 1000BaseX Ethernet",
   1417 	  WM_T_82576,		WMP_F_FIBER },
   1418 
   1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1420 	  "82576 gigabit Ethernet (SERDES)",
   1421 	  WM_T_82576,		WMP_F_SERDES },
   1422 
   1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1424 	  "82576 quad-1000BaseT Ethernet",
   1425 	  WM_T_82576,		WMP_F_COPPER },
   1426 
   1427 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1428 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1429 	  WM_T_82576,		WMP_F_COPPER },
   1430 
   1431 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1432 	  "82576 gigabit Ethernet",
   1433 	  WM_T_82576,		WMP_F_COPPER },
   1434 
   1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1436 	  "82576 gigabit Ethernet (SERDES)",
   1437 	  WM_T_82576,		WMP_F_SERDES },
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1439 	  "82576 quad-gigabit Ethernet (SERDES)",
   1440 	  WM_T_82576,		WMP_F_SERDES },
   1441 
   1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1443 	  "82580 1000BaseT Ethernet",
   1444 	  WM_T_82580,		WMP_F_COPPER },
   1445 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1446 	  "82580 1000BaseX Ethernet",
   1447 	  WM_T_82580,		WMP_F_FIBER },
   1448 
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1450 	  "82580 1000BaseT Ethernet (SERDES)",
   1451 	  WM_T_82580,		WMP_F_SERDES },
   1452 
   1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1454 	  "82580 gigabit Ethernet (SGMII)",
   1455 	  WM_T_82580,		WMP_F_COPPER },
   1456 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1457 	  "82580 dual-1000BaseT Ethernet",
   1458 	  WM_T_82580,		WMP_F_COPPER },
   1459 
   1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1461 	  "82580 quad-1000BaseX Ethernet",
   1462 	  WM_T_82580,		WMP_F_FIBER },
   1463 
   1464 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1465 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1466 	  WM_T_82580,		WMP_F_COPPER },
   1467 
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1469 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1470 	  WM_T_82580,		WMP_F_SERDES },
   1471 
   1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1473 	  "DH89XXCC 1000BASE-KX Ethernet",
   1474 	  WM_T_82580,		WMP_F_SERDES },
   1475 
   1476 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1477 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1478 	  WM_T_82580,		WMP_F_SERDES },
   1479 
   1480 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1481 	  "I350 Gigabit Network Connection",
   1482 	  WM_T_I350,		WMP_F_COPPER },
   1483 
   1484 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1485 	  "I350 Gigabit Fiber Network Connection",
   1486 	  WM_T_I350,		WMP_F_FIBER },
   1487 
   1488 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1489 	  "I350 Gigabit Backplane Connection",
   1490 	  WM_T_I350,		WMP_F_SERDES },
   1491 
   1492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1493 	  "I350 Quad Port Gigabit Ethernet",
   1494 	  WM_T_I350,		WMP_F_SERDES },
   1495 
   1496 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1497 	  "I350 Gigabit Connection",
   1498 	  WM_T_I350,		WMP_F_COPPER },
   1499 
   1500 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1501 	  "I354 Gigabit Ethernet (KX)",
   1502 	  WM_T_I354,		WMP_F_SERDES },
   1503 
   1504 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1505 	  "I354 Gigabit Ethernet (SGMII)",
   1506 	  WM_T_I354,		WMP_F_COPPER },
   1507 
   1508 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1509 	  "I354 Gigabit Ethernet (2.5G)",
   1510 	  WM_T_I354,		WMP_F_COPPER },
   1511 
   1512 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1513 	  "I210-T1 Ethernet Server Adapter",
   1514 	  WM_T_I210,		WMP_F_COPPER },
   1515 
   1516 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1517 	  "I210 Ethernet (Copper OEM)",
   1518 	  WM_T_I210,		WMP_F_COPPER },
   1519 
   1520 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1521 	  "I210 Ethernet (Copper IT)",
   1522 	  WM_T_I210,		WMP_F_COPPER },
   1523 
   1524 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1525 	  "I210 Ethernet (Copper, FLASH less)",
   1526 	  WM_T_I210,		WMP_F_COPPER },
   1527 
   1528 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1529 	  "I210 Gigabit Ethernet (Fiber)",
   1530 	  WM_T_I210,		WMP_F_FIBER },
   1531 
   1532 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1533 	  "I210 Gigabit Ethernet (SERDES)",
   1534 	  WM_T_I210,		WMP_F_SERDES },
   1535 
   1536 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1537 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1538 	  WM_T_I210,		WMP_F_SERDES },
   1539 
   1540 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1541 	  "I210 Gigabit Ethernet (SGMII)",
   1542 	  WM_T_I210,		WMP_F_COPPER },
   1543 
   1544 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1545 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1546 	  WM_T_I210,		WMP_F_COPPER },
   1547 
   1548 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1549 	  "I211 Ethernet (COPPER)",
   1550 	  WM_T_I211,		WMP_F_COPPER },
   1551 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1552 	  "I217 V Ethernet Connection",
   1553 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1554 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1555 	  "I217 LM Ethernet Connection",
   1556 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1557 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1558 	  "I218 V Ethernet Connection",
   1559 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1560 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1561 	  "I218 V Ethernet Connection",
   1562 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1563 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1564 	  "I218 V Ethernet Connection",
   1565 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1566 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1567 	  "I218 LM Ethernet Connection",
   1568 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1569 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1570 	  "I218 LM Ethernet Connection",
   1571 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1572 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1573 	  "I218 LM Ethernet Connection",
   1574 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1575 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1576 	  "I219 LM Ethernet Connection",
   1577 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1578 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1579 	  "I219 LM Ethernet Connection",
   1580 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1581 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1582 	  "I219 LM Ethernet Connection",
   1583 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1584 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1585 	  "I219 LM Ethernet Connection",
   1586 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1587 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1588 	  "I219 LM Ethernet Connection",
   1589 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1590 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1591 	  "I219 LM Ethernet Connection",
   1592 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1593 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1594 	  "I219 LM Ethernet Connection",
   1595 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1596 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1597 	  "I219 LM Ethernet Connection",
   1598 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1599 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1600 	  "I219 LM Ethernet Connection",
   1601 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1602 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1603 	  "I219 LM Ethernet Connection",
   1604 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1605 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1606 	  "I219 LM Ethernet Connection",
   1607 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1608 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1609 	  "I219 LM Ethernet Connection",
   1610 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1611 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1612 	  "I219 LM Ethernet Connection",
   1613 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1614 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1615 	  "I219 LM Ethernet Connection",
   1616 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1617 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1618 	  "I219 LM Ethernet Connection",
   1619 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1620 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1621 	  "I219 V Ethernet Connection",
   1622 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1623 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1624 	  "I219 V Ethernet Connection",
   1625 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1626 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1627 	  "I219 V Ethernet Connection",
   1628 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1629 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1630 	  "I219 V Ethernet Connection",
   1631 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1632 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1633 	  "I219 V Ethernet Connection",
   1634 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1635 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1636 	  "I219 V Ethernet Connection",
   1637 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1638 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1639 	  "I219 V Ethernet Connection",
   1640 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1641 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1642 	  "I219 V Ethernet Connection",
   1643 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1644 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1645 	  "I219 V Ethernet Connection",
   1646 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1647 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1648 	  "I219 V Ethernet Connection",
   1649 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1650 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1651 	  "I219 V Ethernet Connection",
   1652 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1653 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1654 	  "I219 V Ethernet Connection",
   1655 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1656 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1657 	  "I219 V Ethernet Connection",
   1658 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1659 	{ 0,			0,
   1660 	  NULL,
   1661 	  0,			0 },
   1662 };
   1663 
   1664 /*
   1665  * Register read/write functions.
   1666  * Other than CSR_{READ|WRITE}().
   1667  */
   1668 
   1669 #if 0 /* Not currently used */
   1670 static inline uint32_t
   1671 wm_io_read(struct wm_softc *sc, int reg)
   1672 {
   1673 
   1674 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1675 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1676 }
   1677 #endif
   1678 
   1679 static inline void
   1680 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1681 {
   1682 
   1683 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1684 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1685 }
   1686 
   1687 static inline void
   1688 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1689     uint32_t data)
   1690 {
   1691 	uint32_t regval;
   1692 	int i;
   1693 
   1694 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1695 
   1696 	CSR_WRITE(sc, reg, regval);
   1697 
   1698 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1699 		delay(5);
   1700 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1701 			break;
   1702 	}
   1703 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1704 		aprint_error("%s: WARNING:"
   1705 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1706 		    device_xname(sc->sc_dev), reg);
   1707 	}
   1708 }
   1709 
   1710 static inline void
   1711 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1712 {
   1713 	wa->wa_low = htole32(v & 0xffffffffU);
   1714 	if (sizeof(bus_addr_t) == 8)
   1715 		wa->wa_high = htole32((uint64_t) v >> 32);
   1716 	else
   1717 		wa->wa_high = 0;
   1718 }
   1719 
   1720 /*
   1721  * Descriptor sync/init functions.
   1722  */
   1723 static inline void
   1724 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1725 {
   1726 	struct wm_softc *sc = txq->txq_sc;
   1727 
   1728 	/* If it will wrap around, sync to the end of the ring. */
   1729 	if ((start + num) > WM_NTXDESC(txq)) {
   1730 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1731 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1732 		    (WM_NTXDESC(txq) - start), ops);
   1733 		num -= (WM_NTXDESC(txq) - start);
   1734 		start = 0;
   1735 	}
   1736 
   1737 	/* Now sync whatever is left. */
   1738 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1739 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1740 }
   1741 
   1742 static inline void
   1743 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1744 {
   1745 	struct wm_softc *sc = rxq->rxq_sc;
   1746 
   1747 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1748 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1749 }
   1750 
   1751 static inline void
   1752 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1753 {
   1754 	struct wm_softc *sc = rxq->rxq_sc;
   1755 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1756 	struct mbuf *m = rxs->rxs_mbuf;
   1757 
   1758 	/*
   1759 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1760 	 * so that the payload after the Ethernet header is aligned
   1761 	 * to a 4-byte boundary.
   1762 
   1763 	 * XXX BRAINDAMAGE ALERT!
   1764 	 * The stupid chip uses the same size for every buffer, which
   1765 	 * is set in the Receive Control register.  We are using the 2K
   1766 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1767 	 * reason, we can't "scoot" packets longer than the standard
   1768 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1769 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1770 	 * the upper layer copy the headers.
   1771 	 */
   1772 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1773 
   1774 	if (sc->sc_type == WM_T_82574) {
   1775 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1776 		rxd->erx_data.erxd_addr =
   1777 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1778 		rxd->erx_data.erxd_dd = 0;
   1779 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1780 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1781 
   1782 		rxd->nqrx_data.nrxd_paddr =
   1783 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1784 		/* Currently, split header is not supported. */
   1785 		rxd->nqrx_data.nrxd_haddr = 0;
   1786 	} else {
   1787 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1788 
   1789 		wm_set_dma_addr(&rxd->wrx_addr,
   1790 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1791 		rxd->wrx_len = 0;
   1792 		rxd->wrx_cksum = 0;
   1793 		rxd->wrx_status = 0;
   1794 		rxd->wrx_errors = 0;
   1795 		rxd->wrx_special = 0;
   1796 	}
   1797 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1798 
   1799 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1800 }
   1801 
   1802 /*
   1803  * Device driver interface functions and commonly used functions.
   1804  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1805  */
   1806 
   1807 /* Lookup supported device table */
   1808 static const struct wm_product *
   1809 wm_lookup(const struct pci_attach_args *pa)
   1810 {
   1811 	const struct wm_product *wmp;
   1812 
   1813 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1814 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1815 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1816 			return wmp;
   1817 	}
   1818 	return NULL;
   1819 }
   1820 
   1821 /* The match function (ca_match) */
   1822 static int
   1823 wm_match(device_t parent, cfdata_t cf, void *aux)
   1824 {
   1825 	struct pci_attach_args *pa = aux;
   1826 
   1827 	if (wm_lookup(pa) != NULL)
   1828 		return 1;
   1829 
   1830 	return 0;
   1831 }
   1832 
   1833 /* The attach function (ca_attach) */
   1834 static void
   1835 wm_attach(device_t parent, device_t self, void *aux)
   1836 {
   1837 	struct wm_softc *sc = device_private(self);
   1838 	struct pci_attach_args *pa = aux;
   1839 	prop_dictionary_t dict;
   1840 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1841 	pci_chipset_tag_t pc = pa->pa_pc;
   1842 	int counts[PCI_INTR_TYPE_SIZE];
   1843 	pci_intr_type_t max_type;
   1844 	const char *eetype, *xname;
   1845 	bus_space_tag_t memt;
   1846 	bus_space_handle_t memh;
   1847 	bus_size_t memsize;
   1848 	int memh_valid;
   1849 	int i, error;
   1850 	const struct wm_product *wmp;
   1851 	prop_data_t ea;
   1852 	prop_number_t pn;
   1853 	uint8_t enaddr[ETHER_ADDR_LEN];
   1854 	char buf[256];
   1855 	char wqname[MAXCOMLEN];
   1856 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1857 	pcireg_t preg, memtype;
   1858 	uint16_t eeprom_data, apme_mask;
   1859 	bool force_clear_smbi;
   1860 	uint32_t link_mode;
   1861 	uint32_t reg;
   1862 
   1863 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
   1864 	sc->sc_debug = WM_DEBUG_DEFAULT;
   1865 #endif
   1866 	sc->sc_dev = self;
   1867 	callout_init(&sc->sc_tick_ch, WM_CALLOUT_FLAGS);
   1868 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   1869 	sc->sc_core_stopping = false;
   1870 
   1871 	wmp = wm_lookup(pa);
   1872 #ifdef DIAGNOSTIC
   1873 	if (wmp == NULL) {
   1874 		printf("\n");
   1875 		panic("wm_attach: impossible");
   1876 	}
   1877 #endif
   1878 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1879 
   1880 	sc->sc_pc = pa->pa_pc;
   1881 	sc->sc_pcitag = pa->pa_tag;
   1882 
   1883 	if (pci_dma64_available(pa))
   1884 		sc->sc_dmat = pa->pa_dmat64;
   1885 	else
   1886 		sc->sc_dmat = pa->pa_dmat;
   1887 
   1888 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1889 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1890 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1891 
   1892 	sc->sc_type = wmp->wmp_type;
   1893 
   1894 	/* Set default function pointers */
   1895 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1896 	sc->phy.release = sc->nvm.release = wm_put_null;
   1897 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1898 
   1899 	if (sc->sc_type < WM_T_82543) {
   1900 		if (sc->sc_rev < 2) {
   1901 			aprint_error_dev(sc->sc_dev,
   1902 			    "i82542 must be at least rev. 2\n");
   1903 			return;
   1904 		}
   1905 		if (sc->sc_rev < 3)
   1906 			sc->sc_type = WM_T_82542_2_0;
   1907 	}
   1908 
   1909 	/*
   1910 	 * Disable MSI for Errata:
   1911 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1912 	 *
   1913 	 *  82544: Errata 25
   1914 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1915 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1916 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1917 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1918 	 *
   1919 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1920 	 *
   1921 	 *  82571 & 82572: Errata 63
   1922 	 */
   1923 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1924 	    || (sc->sc_type == WM_T_82572))
   1925 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1926 
   1927 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1928 	    || (sc->sc_type == WM_T_82580)
   1929 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1930 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1931 		sc->sc_flags |= WM_F_NEWQUEUE;
   1932 
   1933 	/* Set device properties (mactype) */
   1934 	dict = device_properties(sc->sc_dev);
   1935 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1936 
   1937 	/*
   1938 	 * Map the device.  All devices support memory-mapped acccess,
   1939 	 * and it is really required for normal operation.
   1940 	 */
   1941 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1942 	switch (memtype) {
   1943 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1944 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1945 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1946 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1947 		break;
   1948 	default:
   1949 		memh_valid = 0;
   1950 		break;
   1951 	}
   1952 
   1953 	if (memh_valid) {
   1954 		sc->sc_st = memt;
   1955 		sc->sc_sh = memh;
   1956 		sc->sc_ss = memsize;
   1957 	} else {
   1958 		aprint_error_dev(sc->sc_dev,
   1959 		    "unable to map device registers\n");
   1960 		return;
   1961 	}
   1962 
   1963 	/*
   1964 	 * In addition, i82544 and later support I/O mapped indirect
   1965 	 * register access.  It is not desirable (nor supported in
   1966 	 * this driver) to use it for normal operation, though it is
   1967 	 * required to work around bugs in some chip versions.
   1968 	 */
   1969 	if (sc->sc_type >= WM_T_82544) {
   1970 		/* First we have to find the I/O BAR. */
   1971 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1972 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1973 			if (memtype == PCI_MAPREG_TYPE_IO)
   1974 				break;
   1975 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1976 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1977 				i += 4;	/* skip high bits, too */
   1978 		}
   1979 		if (i < PCI_MAPREG_END) {
   1980 			/*
   1981 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1982 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1983 			 * It's no problem because newer chips has no this
   1984 			 * bug.
   1985 			 *
   1986 			 * The i8254x doesn't apparently respond when the
   1987 			 * I/O BAR is 0, which looks somewhat like it's not
   1988 			 * been configured.
   1989 			 */
   1990 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1991 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1992 				aprint_error_dev(sc->sc_dev,
   1993 				    "WARNING: I/O BAR at zero.\n");
   1994 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1995 					0, &sc->sc_iot, &sc->sc_ioh,
   1996 					NULL, &sc->sc_ios) == 0) {
   1997 				sc->sc_flags |= WM_F_IOH_VALID;
   1998 			} else
   1999 				aprint_error_dev(sc->sc_dev,
   2000 				    "WARNING: unable to map I/O space\n");
   2001 		}
   2002 
   2003 	}
   2004 
   2005 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   2006 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   2007 	preg |= PCI_COMMAND_MASTER_ENABLE;
   2008 	if (sc->sc_type < WM_T_82542_2_1)
   2009 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   2010 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   2011 
   2012 	/* Power up chip */
   2013 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   2014 	    && error != EOPNOTSUPP) {
   2015 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   2016 		return;
   2017 	}
   2018 
   2019 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   2020 	/*
   2021 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   2022 	 * resource.
   2023 	 */
   2024 	if (sc->sc_nqueues > 1) {
   2025 		max_type = PCI_INTR_TYPE_MSIX;
   2026 		/*
   2027 		 *  82583 has a MSI-X capability in the PCI configuration space
   2028 		 * but it doesn't support it. At least the document doesn't
   2029 		 * say anything about MSI-X.
   2030 		 */
   2031 		counts[PCI_INTR_TYPE_MSIX]
   2032 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2033 	} else {
   2034 		max_type = PCI_INTR_TYPE_MSI;
   2035 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2036 	}
   2037 
   2038 	/* Allocation settings */
   2039 	counts[PCI_INTR_TYPE_MSI] = 1;
   2040 	counts[PCI_INTR_TYPE_INTX] = 1;
   2041 	/* overridden by disable flags */
   2042 	if (wm_disable_msi != 0) {
   2043 		counts[PCI_INTR_TYPE_MSI] = 0;
   2044 		if (wm_disable_msix != 0) {
   2045 			max_type = PCI_INTR_TYPE_INTX;
   2046 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2047 		}
   2048 	} else if (wm_disable_msix != 0) {
   2049 		max_type = PCI_INTR_TYPE_MSI;
   2050 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2051 	}
   2052 
   2053 alloc_retry:
   2054 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2055 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2056 		return;
   2057 	}
   2058 
   2059 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2060 		error = wm_setup_msix(sc);
   2061 		if (error) {
   2062 			pci_intr_release(pc, sc->sc_intrs,
   2063 			    counts[PCI_INTR_TYPE_MSIX]);
   2064 
   2065 			/* Setup for MSI: Disable MSI-X */
   2066 			max_type = PCI_INTR_TYPE_MSI;
   2067 			counts[PCI_INTR_TYPE_MSI] = 1;
   2068 			counts[PCI_INTR_TYPE_INTX] = 1;
   2069 			goto alloc_retry;
   2070 		}
   2071 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2072 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2073 		error = wm_setup_legacy(sc);
   2074 		if (error) {
   2075 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2076 			    counts[PCI_INTR_TYPE_MSI]);
   2077 
   2078 			/* The next try is for INTx: Disable MSI */
   2079 			max_type = PCI_INTR_TYPE_INTX;
   2080 			counts[PCI_INTR_TYPE_INTX] = 1;
   2081 			goto alloc_retry;
   2082 		}
   2083 	} else {
   2084 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2085 		error = wm_setup_legacy(sc);
   2086 		if (error) {
   2087 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2088 			    counts[PCI_INTR_TYPE_INTX]);
   2089 			return;
   2090 		}
   2091 	}
   2092 
   2093 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2094 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2095 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2096 	    WM_WORKQUEUE_FLAGS);
   2097 	if (error) {
   2098 		aprint_error_dev(sc->sc_dev,
   2099 		    "unable to create workqueue\n");
   2100 		goto out;
   2101 	}
   2102 
   2103 	/*
   2104 	 * Check the function ID (unit number of the chip).
   2105 	 */
   2106 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2107 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2108 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2109 	    || (sc->sc_type == WM_T_82580)
   2110 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2111 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2112 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2113 	else
   2114 		sc->sc_funcid = 0;
   2115 
   2116 	/*
   2117 	 * Determine a few things about the bus we're connected to.
   2118 	 */
   2119 	if (sc->sc_type < WM_T_82543) {
   2120 		/* We don't really know the bus characteristics here. */
   2121 		sc->sc_bus_speed = 33;
   2122 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2123 		/*
   2124 		 * CSA (Communication Streaming Architecture) is about as fast
   2125 		 * a 32-bit 66MHz PCI Bus.
   2126 		 */
   2127 		sc->sc_flags |= WM_F_CSA;
   2128 		sc->sc_bus_speed = 66;
   2129 		aprint_verbose_dev(sc->sc_dev,
   2130 		    "Communication Streaming Architecture\n");
   2131 		if (sc->sc_type == WM_T_82547) {
   2132 			callout_init(&sc->sc_txfifo_ch, WM_CALLOUT_FLAGS);
   2133 			callout_setfunc(&sc->sc_txfifo_ch,
   2134 			    wm_82547_txfifo_stall, sc);
   2135 			aprint_verbose_dev(sc->sc_dev,
   2136 			    "using 82547 Tx FIFO stall work-around\n");
   2137 		}
   2138 	} else if (sc->sc_type >= WM_T_82571) {
   2139 		sc->sc_flags |= WM_F_PCIE;
   2140 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2141 		    && (sc->sc_type != WM_T_ICH10)
   2142 		    && (sc->sc_type != WM_T_PCH)
   2143 		    && (sc->sc_type != WM_T_PCH2)
   2144 		    && (sc->sc_type != WM_T_PCH_LPT)
   2145 		    && (sc->sc_type != WM_T_PCH_SPT)
   2146 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2147 			/* ICH* and PCH* have no PCIe capability registers */
   2148 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2149 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2150 				NULL) == 0)
   2151 				aprint_error_dev(sc->sc_dev,
   2152 				    "unable to find PCIe capability\n");
   2153 		}
   2154 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2155 	} else {
   2156 		reg = CSR_READ(sc, WMREG_STATUS);
   2157 		if (reg & STATUS_BUS64)
   2158 			sc->sc_flags |= WM_F_BUS64;
   2159 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2160 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2161 
   2162 			sc->sc_flags |= WM_F_PCIX;
   2163 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2164 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2165 				aprint_error_dev(sc->sc_dev,
   2166 				    "unable to find PCIX capability\n");
   2167 			else if (sc->sc_type != WM_T_82545_3 &&
   2168 				 sc->sc_type != WM_T_82546_3) {
   2169 				/*
   2170 				 * Work around a problem caused by the BIOS
   2171 				 * setting the max memory read byte count
   2172 				 * incorrectly.
   2173 				 */
   2174 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2175 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2176 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2177 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2178 
   2179 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2180 				    PCIX_CMD_BYTECNT_SHIFT;
   2181 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2182 				    PCIX_STATUS_MAXB_SHIFT;
   2183 				if (bytecnt > maxb) {
   2184 					aprint_verbose_dev(sc->sc_dev,
   2185 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2186 					    512 << bytecnt, 512 << maxb);
   2187 					pcix_cmd = (pcix_cmd &
   2188 					    ~PCIX_CMD_BYTECNT_MASK) |
   2189 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2190 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2191 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2192 					    pcix_cmd);
   2193 				}
   2194 			}
   2195 		}
   2196 		/*
   2197 		 * The quad port adapter is special; it has a PCIX-PCIX
   2198 		 * bridge on the board, and can run the secondary bus at
   2199 		 * a higher speed.
   2200 		 */
   2201 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2202 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2203 								      : 66;
   2204 		} else if (sc->sc_flags & WM_F_PCIX) {
   2205 			switch (reg & STATUS_PCIXSPD_MASK) {
   2206 			case STATUS_PCIXSPD_50_66:
   2207 				sc->sc_bus_speed = 66;
   2208 				break;
   2209 			case STATUS_PCIXSPD_66_100:
   2210 				sc->sc_bus_speed = 100;
   2211 				break;
   2212 			case STATUS_PCIXSPD_100_133:
   2213 				sc->sc_bus_speed = 133;
   2214 				break;
   2215 			default:
   2216 				aprint_error_dev(sc->sc_dev,
   2217 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2218 				    reg & STATUS_PCIXSPD_MASK);
   2219 				sc->sc_bus_speed = 66;
   2220 				break;
   2221 			}
   2222 		} else
   2223 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2224 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2225 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2226 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2227 	}
   2228 
   2229 	/* clear interesting stat counters */
   2230 	CSR_READ(sc, WMREG_COLC);
   2231 	CSR_READ(sc, WMREG_RXERRC);
   2232 
   2233 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2234 	    || (sc->sc_type >= WM_T_ICH8))
   2235 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2236 	if (sc->sc_type >= WM_T_ICH8)
   2237 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2238 
   2239 	/* Set PHY, NVM mutex related stuff */
   2240 	switch (sc->sc_type) {
   2241 	case WM_T_82542_2_0:
   2242 	case WM_T_82542_2_1:
   2243 	case WM_T_82543:
   2244 	case WM_T_82544:
   2245 		/* Microwire */
   2246 		sc->nvm.read = wm_nvm_read_uwire;
   2247 		sc->sc_nvm_wordsize = 64;
   2248 		sc->sc_nvm_addrbits = 6;
   2249 		break;
   2250 	case WM_T_82540:
   2251 	case WM_T_82545:
   2252 	case WM_T_82545_3:
   2253 	case WM_T_82546:
   2254 	case WM_T_82546_3:
   2255 		/* Microwire */
   2256 		sc->nvm.read = wm_nvm_read_uwire;
   2257 		reg = CSR_READ(sc, WMREG_EECD);
   2258 		if (reg & EECD_EE_SIZE) {
   2259 			sc->sc_nvm_wordsize = 256;
   2260 			sc->sc_nvm_addrbits = 8;
   2261 		} else {
   2262 			sc->sc_nvm_wordsize = 64;
   2263 			sc->sc_nvm_addrbits = 6;
   2264 		}
   2265 		sc->sc_flags |= WM_F_LOCK_EECD;
   2266 		sc->nvm.acquire = wm_get_eecd;
   2267 		sc->nvm.release = wm_put_eecd;
   2268 		break;
   2269 	case WM_T_82541:
   2270 	case WM_T_82541_2:
   2271 	case WM_T_82547:
   2272 	case WM_T_82547_2:
   2273 		reg = CSR_READ(sc, WMREG_EECD);
   2274 		/*
   2275 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2276 		 * on 8254[17], so set flags and functios before calling it.
   2277 		 */
   2278 		sc->sc_flags |= WM_F_LOCK_EECD;
   2279 		sc->nvm.acquire = wm_get_eecd;
   2280 		sc->nvm.release = wm_put_eecd;
   2281 		if (reg & EECD_EE_TYPE) {
   2282 			/* SPI */
   2283 			sc->nvm.read = wm_nvm_read_spi;
   2284 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2285 			wm_nvm_set_addrbits_size_eecd(sc);
   2286 		} else {
   2287 			/* Microwire */
   2288 			sc->nvm.read = wm_nvm_read_uwire;
   2289 			if ((reg & EECD_EE_ABITS) != 0) {
   2290 				sc->sc_nvm_wordsize = 256;
   2291 				sc->sc_nvm_addrbits = 8;
   2292 			} else {
   2293 				sc->sc_nvm_wordsize = 64;
   2294 				sc->sc_nvm_addrbits = 6;
   2295 			}
   2296 		}
   2297 		break;
   2298 	case WM_T_82571:
   2299 	case WM_T_82572:
   2300 		/* SPI */
   2301 		sc->nvm.read = wm_nvm_read_eerd;
   2302 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2303 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2304 		wm_nvm_set_addrbits_size_eecd(sc);
   2305 		sc->phy.acquire = wm_get_swsm_semaphore;
   2306 		sc->phy.release = wm_put_swsm_semaphore;
   2307 		sc->nvm.acquire = wm_get_nvm_82571;
   2308 		sc->nvm.release = wm_put_nvm_82571;
   2309 		break;
   2310 	case WM_T_82573:
   2311 	case WM_T_82574:
   2312 	case WM_T_82583:
   2313 		sc->nvm.read = wm_nvm_read_eerd;
   2314 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2315 		if (sc->sc_type == WM_T_82573) {
   2316 			sc->phy.acquire = wm_get_swsm_semaphore;
   2317 			sc->phy.release = wm_put_swsm_semaphore;
   2318 			sc->nvm.acquire = wm_get_nvm_82571;
   2319 			sc->nvm.release = wm_put_nvm_82571;
   2320 		} else {
   2321 			/* Both PHY and NVM use the same semaphore. */
   2322 			sc->phy.acquire = sc->nvm.acquire
   2323 			    = wm_get_swfwhw_semaphore;
   2324 			sc->phy.release = sc->nvm.release
   2325 			    = wm_put_swfwhw_semaphore;
   2326 		}
   2327 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2328 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2329 			sc->sc_nvm_wordsize = 2048;
   2330 		} else {
   2331 			/* SPI */
   2332 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2333 			wm_nvm_set_addrbits_size_eecd(sc);
   2334 		}
   2335 		break;
   2336 	case WM_T_82575:
   2337 	case WM_T_82576:
   2338 	case WM_T_82580:
   2339 	case WM_T_I350:
   2340 	case WM_T_I354:
   2341 	case WM_T_80003:
   2342 		/* SPI */
   2343 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2344 		wm_nvm_set_addrbits_size_eecd(sc);
   2345 		if ((sc->sc_type == WM_T_80003)
   2346 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2347 			sc->nvm.read = wm_nvm_read_eerd;
   2348 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2349 		} else {
   2350 			sc->nvm.read = wm_nvm_read_spi;
   2351 			sc->sc_flags |= WM_F_LOCK_EECD;
   2352 		}
   2353 		sc->phy.acquire = wm_get_phy_82575;
   2354 		sc->phy.release = wm_put_phy_82575;
   2355 		sc->nvm.acquire = wm_get_nvm_80003;
   2356 		sc->nvm.release = wm_put_nvm_80003;
   2357 		break;
   2358 	case WM_T_ICH8:
   2359 	case WM_T_ICH9:
   2360 	case WM_T_ICH10:
   2361 	case WM_T_PCH:
   2362 	case WM_T_PCH2:
   2363 	case WM_T_PCH_LPT:
   2364 		sc->nvm.read = wm_nvm_read_ich8;
   2365 		/* FLASH */
   2366 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2367 		sc->sc_nvm_wordsize = 2048;
   2368 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2369 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2370 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2371 			aprint_error_dev(sc->sc_dev,
   2372 			    "can't map FLASH registers\n");
   2373 			goto out;
   2374 		}
   2375 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2376 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2377 		    ICH_FLASH_SECTOR_SIZE;
   2378 		sc->sc_ich8_flash_bank_size =
   2379 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2380 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2381 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2382 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2383 		sc->sc_flashreg_offset = 0;
   2384 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2385 		sc->phy.release = wm_put_swflag_ich8lan;
   2386 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2387 		sc->nvm.release = wm_put_nvm_ich8lan;
   2388 		break;
   2389 	case WM_T_PCH_SPT:
   2390 	case WM_T_PCH_CNP:
   2391 		sc->nvm.read = wm_nvm_read_spt;
   2392 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2393 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2394 		sc->sc_flasht = sc->sc_st;
   2395 		sc->sc_flashh = sc->sc_sh;
   2396 		sc->sc_ich8_flash_base = 0;
   2397 		sc->sc_nvm_wordsize =
   2398 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2399 		    * NVM_SIZE_MULTIPLIER;
   2400 		/* It is size in bytes, we want words */
   2401 		sc->sc_nvm_wordsize /= 2;
   2402 		/* Assume 2 banks */
   2403 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2404 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2405 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2406 		sc->phy.release = wm_put_swflag_ich8lan;
   2407 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2408 		sc->nvm.release = wm_put_nvm_ich8lan;
   2409 		break;
   2410 	case WM_T_I210:
   2411 	case WM_T_I211:
   2412 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2413 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2414 		if (wm_nvm_flash_presence_i210(sc)) {
   2415 			sc->nvm.read = wm_nvm_read_eerd;
   2416 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2417 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2418 			wm_nvm_set_addrbits_size_eecd(sc);
   2419 		} else {
   2420 			sc->nvm.read = wm_nvm_read_invm;
   2421 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2422 			sc->sc_nvm_wordsize = INVM_SIZE;
   2423 		}
   2424 		sc->phy.acquire = wm_get_phy_82575;
   2425 		sc->phy.release = wm_put_phy_82575;
   2426 		sc->nvm.acquire = wm_get_nvm_80003;
   2427 		sc->nvm.release = wm_put_nvm_80003;
   2428 		break;
   2429 	default:
   2430 		break;
   2431 	}
   2432 
   2433 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2434 	switch (sc->sc_type) {
   2435 	case WM_T_82571:
   2436 	case WM_T_82572:
   2437 		reg = CSR_READ(sc, WMREG_SWSM2);
   2438 		if ((reg & SWSM2_LOCK) == 0) {
   2439 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2440 			force_clear_smbi = true;
   2441 		} else
   2442 			force_clear_smbi = false;
   2443 		break;
   2444 	case WM_T_82573:
   2445 	case WM_T_82574:
   2446 	case WM_T_82583:
   2447 		force_clear_smbi = true;
   2448 		break;
   2449 	default:
   2450 		force_clear_smbi = false;
   2451 		break;
   2452 	}
   2453 	if (force_clear_smbi) {
   2454 		reg = CSR_READ(sc, WMREG_SWSM);
   2455 		if ((reg & SWSM_SMBI) != 0)
   2456 			aprint_error_dev(sc->sc_dev,
   2457 			    "Please update the Bootagent\n");
   2458 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2459 	}
   2460 
   2461 	/*
   2462 	 * Defer printing the EEPROM type until after verifying the checksum
   2463 	 * This allows the EEPROM type to be printed correctly in the case
   2464 	 * that no EEPROM is attached.
   2465 	 */
   2466 	/*
   2467 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2468 	 * this for later, so we can fail future reads from the EEPROM.
   2469 	 */
   2470 	if (wm_nvm_validate_checksum(sc)) {
   2471 		/*
   2472 		 * Read twice again because some PCI-e parts fail the
   2473 		 * first check due to the link being in sleep state.
   2474 		 */
   2475 		if (wm_nvm_validate_checksum(sc))
   2476 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2477 	}
   2478 
   2479 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2480 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2481 	else {
   2482 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2483 		    sc->sc_nvm_wordsize);
   2484 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2485 			aprint_verbose("iNVM");
   2486 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2487 			aprint_verbose("FLASH(HW)");
   2488 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2489 			aprint_verbose("FLASH");
   2490 		else {
   2491 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2492 				eetype = "SPI";
   2493 			else
   2494 				eetype = "MicroWire";
   2495 			aprint_verbose("(%d address bits) %s EEPROM",
   2496 			    sc->sc_nvm_addrbits, eetype);
   2497 		}
   2498 	}
   2499 	wm_nvm_version(sc);
   2500 	aprint_verbose("\n");
   2501 
   2502 	/*
   2503 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2504 	 * incorrect.
   2505 	 */
   2506 	wm_gmii_setup_phytype(sc, 0, 0);
   2507 
   2508 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2509 	switch (sc->sc_type) {
   2510 	case WM_T_ICH8:
   2511 	case WM_T_ICH9:
   2512 	case WM_T_ICH10:
   2513 	case WM_T_PCH:
   2514 	case WM_T_PCH2:
   2515 	case WM_T_PCH_LPT:
   2516 	case WM_T_PCH_SPT:
   2517 	case WM_T_PCH_CNP:
   2518 		apme_mask = WUC_APME;
   2519 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2520 		if ((eeprom_data & apme_mask) != 0)
   2521 			sc->sc_flags |= WM_F_WOL;
   2522 		break;
   2523 	default:
   2524 		break;
   2525 	}
   2526 
   2527 	/* Reset the chip to a known state. */
   2528 	wm_reset(sc);
   2529 
   2530 	/*
   2531 	 * Check for I21[01] PLL workaround.
   2532 	 *
   2533 	 * Three cases:
   2534 	 * a) Chip is I211.
   2535 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2536 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2537 	 */
   2538 	if (sc->sc_type == WM_T_I211)
   2539 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2540 	if (sc->sc_type == WM_T_I210) {
   2541 		if (!wm_nvm_flash_presence_i210(sc))
   2542 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2543 		else if ((sc->sc_nvm_ver_major < 3)
   2544 		    || ((sc->sc_nvm_ver_major == 3)
   2545 			&& (sc->sc_nvm_ver_minor < 25))) {
   2546 			aprint_verbose_dev(sc->sc_dev,
   2547 			    "ROM image version %d.%d is older than 3.25\n",
   2548 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2549 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2550 		}
   2551 	}
   2552 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2553 		wm_pll_workaround_i210(sc);
   2554 
   2555 	wm_get_wakeup(sc);
   2556 
   2557 	/* Non-AMT based hardware can now take control from firmware */
   2558 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2559 		wm_get_hw_control(sc);
   2560 
   2561 	/*
   2562 	 * Read the Ethernet address from the EEPROM, if not first found
   2563 	 * in device properties.
   2564 	 */
   2565 	ea = prop_dictionary_get(dict, "mac-address");
   2566 	if (ea != NULL) {
   2567 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2568 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2569 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2570 	} else {
   2571 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2572 			aprint_error_dev(sc->sc_dev,
   2573 			    "unable to read Ethernet address\n");
   2574 			goto out;
   2575 		}
   2576 	}
   2577 
   2578 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2579 	    ether_sprintf(enaddr));
   2580 
   2581 	/*
   2582 	 * Read the config info from the EEPROM, and set up various
   2583 	 * bits in the control registers based on their contents.
   2584 	 */
   2585 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2586 	if (pn != NULL) {
   2587 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2588 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2589 	} else {
   2590 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2591 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2592 			goto out;
   2593 		}
   2594 	}
   2595 
   2596 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2597 	if (pn != NULL) {
   2598 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2599 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2600 	} else {
   2601 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2602 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2603 			goto out;
   2604 		}
   2605 	}
   2606 
   2607 	/* check for WM_F_WOL */
   2608 	switch (sc->sc_type) {
   2609 	case WM_T_82542_2_0:
   2610 	case WM_T_82542_2_1:
   2611 	case WM_T_82543:
   2612 		/* dummy? */
   2613 		eeprom_data = 0;
   2614 		apme_mask = NVM_CFG3_APME;
   2615 		break;
   2616 	case WM_T_82544:
   2617 		apme_mask = NVM_CFG2_82544_APM_EN;
   2618 		eeprom_data = cfg2;
   2619 		break;
   2620 	case WM_T_82546:
   2621 	case WM_T_82546_3:
   2622 	case WM_T_82571:
   2623 	case WM_T_82572:
   2624 	case WM_T_82573:
   2625 	case WM_T_82574:
   2626 	case WM_T_82583:
   2627 	case WM_T_80003:
   2628 	case WM_T_82575:
   2629 	case WM_T_82576:
   2630 		apme_mask = NVM_CFG3_APME;
   2631 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2632 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2633 		break;
   2634 	case WM_T_82580:
   2635 	case WM_T_I350:
   2636 	case WM_T_I354:
   2637 	case WM_T_I210:
   2638 	case WM_T_I211:
   2639 		apme_mask = NVM_CFG3_APME;
   2640 		wm_nvm_read(sc,
   2641 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2642 		    1, &eeprom_data);
   2643 		break;
   2644 	case WM_T_ICH8:
   2645 	case WM_T_ICH9:
   2646 	case WM_T_ICH10:
   2647 	case WM_T_PCH:
   2648 	case WM_T_PCH2:
   2649 	case WM_T_PCH_LPT:
   2650 	case WM_T_PCH_SPT:
   2651 	case WM_T_PCH_CNP:
   2652 		/* Already checked before wm_reset () */
   2653 		apme_mask = eeprom_data = 0;
   2654 		break;
   2655 	default: /* XXX 82540 */
   2656 		apme_mask = NVM_CFG3_APME;
   2657 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2658 		break;
   2659 	}
   2660 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2661 	if ((eeprom_data & apme_mask) != 0)
   2662 		sc->sc_flags |= WM_F_WOL;
   2663 
   2664 	/*
   2665 	 * We have the eeprom settings, now apply the special cases
   2666 	 * where the eeprom may be wrong or the board won't support
   2667 	 * wake on lan on a particular port
   2668 	 */
   2669 	switch (sc->sc_pcidevid) {
   2670 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2671 		sc->sc_flags &= ~WM_F_WOL;
   2672 		break;
   2673 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2674 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2675 		/* Wake events only supported on port A for dual fiber
   2676 		 * regardless of eeprom setting */
   2677 		if (sc->sc_funcid == 1)
   2678 			sc->sc_flags &= ~WM_F_WOL;
   2679 		break;
   2680 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2681 		/* If quad port adapter, disable WoL on all but port A */
   2682 		if (sc->sc_funcid != 0)
   2683 			sc->sc_flags &= ~WM_F_WOL;
   2684 		break;
   2685 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2686 		/* Wake events only supported on port A for dual fiber
   2687 		 * regardless of eeprom setting */
   2688 		if (sc->sc_funcid == 1)
   2689 			sc->sc_flags &= ~WM_F_WOL;
   2690 		break;
   2691 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2692 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2693 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2694 		/* If quad port adapter, disable WoL on all but port A */
   2695 		if (sc->sc_funcid != 0)
   2696 			sc->sc_flags &= ~WM_F_WOL;
   2697 		break;
   2698 	}
   2699 
   2700 	if (sc->sc_type >= WM_T_82575) {
   2701 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2702 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2703 			    nvmword);
   2704 			if ((sc->sc_type == WM_T_82575) ||
   2705 			    (sc->sc_type == WM_T_82576)) {
   2706 				/* Check NVM for autonegotiation */
   2707 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2708 				    != 0)
   2709 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2710 			}
   2711 			if ((sc->sc_type == WM_T_82575) ||
   2712 			    (sc->sc_type == WM_T_I350)) {
   2713 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2714 					sc->sc_flags |= WM_F_MAS;
   2715 			}
   2716 		}
   2717 	}
   2718 
   2719 	/*
   2720 	 * XXX need special handling for some multiple port cards
   2721 	 * to disable a paticular port.
   2722 	 */
   2723 
   2724 	if (sc->sc_type >= WM_T_82544) {
   2725 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2726 		if (pn != NULL) {
   2727 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2728 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2729 		} else {
   2730 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2731 				aprint_error_dev(sc->sc_dev,
   2732 				    "unable to read SWDPIN\n");
   2733 				goto out;
   2734 			}
   2735 		}
   2736 	}
   2737 
   2738 	if (cfg1 & NVM_CFG1_ILOS)
   2739 		sc->sc_ctrl |= CTRL_ILOS;
   2740 
   2741 	/*
   2742 	 * XXX
   2743 	 * This code isn't correct because pin 2 and 3 are located
   2744 	 * in different position on newer chips. Check all datasheet.
   2745 	 *
   2746 	 * Until resolve this problem, check if a chip < 82580
   2747 	 */
   2748 	if (sc->sc_type <= WM_T_82580) {
   2749 		if (sc->sc_type >= WM_T_82544) {
   2750 			sc->sc_ctrl |=
   2751 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2752 			    CTRL_SWDPIO_SHIFT;
   2753 			sc->sc_ctrl |=
   2754 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2755 			    CTRL_SWDPINS_SHIFT;
   2756 		} else {
   2757 			sc->sc_ctrl |=
   2758 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2759 			    CTRL_SWDPIO_SHIFT;
   2760 		}
   2761 	}
   2762 
   2763 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2764 		wm_nvm_read(sc,
   2765 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2766 		    1, &nvmword);
   2767 		if (nvmword & NVM_CFG3_ILOS)
   2768 			sc->sc_ctrl |= CTRL_ILOS;
   2769 	}
   2770 
   2771 #if 0
   2772 	if (sc->sc_type >= WM_T_82544) {
   2773 		if (cfg1 & NVM_CFG1_IPS0)
   2774 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2775 		if (cfg1 & NVM_CFG1_IPS1)
   2776 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2777 		sc->sc_ctrl_ext |=
   2778 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2779 		    CTRL_EXT_SWDPIO_SHIFT;
   2780 		sc->sc_ctrl_ext |=
   2781 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2782 		    CTRL_EXT_SWDPINS_SHIFT;
   2783 	} else {
   2784 		sc->sc_ctrl_ext |=
   2785 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2786 		    CTRL_EXT_SWDPIO_SHIFT;
   2787 	}
   2788 #endif
   2789 
   2790 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2791 #if 0
   2792 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2793 #endif
   2794 
   2795 	if (sc->sc_type == WM_T_PCH) {
   2796 		uint16_t val;
   2797 
   2798 		/* Save the NVM K1 bit setting */
   2799 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2800 
   2801 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2802 			sc->sc_nvm_k1_enabled = 1;
   2803 		else
   2804 			sc->sc_nvm_k1_enabled = 0;
   2805 	}
   2806 
   2807 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2808 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2809 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2810 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2811 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2812 	    || sc->sc_type == WM_T_82573
   2813 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2814 		/* Copper only */
   2815 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2816 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2817 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2818 	    || (sc->sc_type ==WM_T_I211)) {
   2819 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2820 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2821 		switch (link_mode) {
   2822 		case CTRL_EXT_LINK_MODE_1000KX:
   2823 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2824 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2825 			break;
   2826 		case CTRL_EXT_LINK_MODE_SGMII:
   2827 			if (wm_sgmii_uses_mdio(sc)) {
   2828 				aprint_normal_dev(sc->sc_dev,
   2829 				    "SGMII(MDIO)\n");
   2830 				sc->sc_flags |= WM_F_SGMII;
   2831 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2832 				break;
   2833 			}
   2834 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2835 			/*FALLTHROUGH*/
   2836 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2837 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2838 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2839 				if (link_mode
   2840 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2841 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2842 					sc->sc_flags |= WM_F_SGMII;
   2843 					aprint_verbose_dev(sc->sc_dev,
   2844 					    "SGMII\n");
   2845 				} else {
   2846 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2847 					aprint_verbose_dev(sc->sc_dev,
   2848 					    "SERDES\n");
   2849 				}
   2850 				break;
   2851 			}
   2852 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2853 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2854 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2855 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2856 				sc->sc_flags |= WM_F_SGMII;
   2857 			}
   2858 			/* Do not change link mode for 100BaseFX */
   2859 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   2860 				break;
   2861 
   2862 			/* Change current link mode setting */
   2863 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2864 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2865 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2866 			else
   2867 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2868 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2869 			break;
   2870 		case CTRL_EXT_LINK_MODE_GMII:
   2871 		default:
   2872 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   2873 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2874 			break;
   2875 		}
   2876 
   2877 		reg &= ~CTRL_EXT_I2C_ENA;
   2878 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2879 			reg |= CTRL_EXT_I2C_ENA;
   2880 		else
   2881 			reg &= ~CTRL_EXT_I2C_ENA;
   2882 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2883 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   2884 			if (!wm_sgmii_uses_mdio(sc))
   2885 				wm_gmii_setup_phytype(sc, 0, 0);
   2886 			wm_reset_mdicnfg_82580(sc);
   2887 		}
   2888 	} else if (sc->sc_type < WM_T_82543 ||
   2889 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2890 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2891 			aprint_error_dev(sc->sc_dev,
   2892 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2893 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2894 		}
   2895 	} else {
   2896 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2897 			aprint_error_dev(sc->sc_dev,
   2898 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2899 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2900 		}
   2901 	}
   2902 
   2903 	if (sc->sc_type >= WM_T_PCH2)
   2904 		sc->sc_flags |= WM_F_EEE;
   2905 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2906 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2907 		/* XXX: Need special handling for I354. (not yet) */
   2908 		if (sc->sc_type != WM_T_I354)
   2909 			sc->sc_flags |= WM_F_EEE;
   2910 	}
   2911 
   2912 	/*
   2913 	 * The I350 has a bug where it always strips the CRC whether
   2914 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   2915 	 */
   2916 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   2917 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   2918 		sc->sc_flags |= WM_F_CRC_STRIP;
   2919 
   2920 	/* Set device properties (macflags) */
   2921 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2922 
   2923 	if (sc->sc_flags != 0) {
   2924 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2925 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2926 	}
   2927 
   2928 #ifdef WM_MPSAFE
   2929 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2930 #else
   2931 	sc->sc_core_lock = NULL;
   2932 #endif
   2933 
   2934 	/* Initialize the media structures accordingly. */
   2935 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2936 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2937 	else
   2938 		wm_tbi_mediainit(sc); /* All others */
   2939 
   2940 	ifp = &sc->sc_ethercom.ec_if;
   2941 	xname = device_xname(sc->sc_dev);
   2942 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2943 	ifp->if_softc = sc;
   2944 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2945 #ifdef WM_MPSAFE
   2946 	ifp->if_extflags = IFEF_MPSAFE;
   2947 #endif
   2948 	ifp->if_ioctl = wm_ioctl;
   2949 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2950 		ifp->if_start = wm_nq_start;
   2951 		/*
   2952 		 * When the number of CPUs is one and the controller can use
   2953 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2954 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2955 		 * and the other is used for link status changing.
   2956 		 * In this situation, wm_nq_transmit() is disadvantageous
   2957 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2958 		 */
   2959 		if (wm_is_using_multiqueue(sc))
   2960 			ifp->if_transmit = wm_nq_transmit;
   2961 	} else {
   2962 		ifp->if_start = wm_start;
   2963 		/*
   2964 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2965 		 */
   2966 		if (wm_is_using_multiqueue(sc))
   2967 			ifp->if_transmit = wm_transmit;
   2968 	}
   2969 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2970 	ifp->if_init = wm_init;
   2971 	ifp->if_stop = wm_stop;
   2972 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2973 	IFQ_SET_READY(&ifp->if_snd);
   2974 
   2975 	/* Check for jumbo frame */
   2976 	switch (sc->sc_type) {
   2977 	case WM_T_82573:
   2978 		/* XXX limited to 9234 if ASPM is disabled */
   2979 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2980 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2981 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2982 		break;
   2983 	case WM_T_82571:
   2984 	case WM_T_82572:
   2985 	case WM_T_82574:
   2986 	case WM_T_82583:
   2987 	case WM_T_82575:
   2988 	case WM_T_82576:
   2989 	case WM_T_82580:
   2990 	case WM_T_I350:
   2991 	case WM_T_I354:
   2992 	case WM_T_I210:
   2993 	case WM_T_I211:
   2994 	case WM_T_80003:
   2995 	case WM_T_ICH9:
   2996 	case WM_T_ICH10:
   2997 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2998 	case WM_T_PCH_LPT:
   2999 	case WM_T_PCH_SPT:
   3000 	case WM_T_PCH_CNP:
   3001 		/* XXX limited to 9234 */
   3002 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3003 		break;
   3004 	case WM_T_PCH:
   3005 		/* XXX limited to 4096 */
   3006 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3007 		break;
   3008 	case WM_T_82542_2_0:
   3009 	case WM_T_82542_2_1:
   3010 	case WM_T_ICH8:
   3011 		/* No support for jumbo frame */
   3012 		break;
   3013 	default:
   3014 		/* ETHER_MAX_LEN_JUMBO */
   3015 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3016 		break;
   3017 	}
   3018 
   3019 	/* If we're a i82543 or greater, we can support VLANs. */
   3020 	if (sc->sc_type >= WM_T_82543) {
   3021 		sc->sc_ethercom.ec_capabilities |=
   3022 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   3023 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   3024 	}
   3025 
   3026 	if ((sc->sc_flags & WM_F_EEE) != 0)
   3027 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   3028 
   3029 	/*
   3030 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   3031 	 * on i82543 and later.
   3032 	 */
   3033 	if (sc->sc_type >= WM_T_82543) {
   3034 		ifp->if_capabilities |=
   3035 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3036 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3037 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3038 		    IFCAP_CSUM_TCPv6_Tx |
   3039 		    IFCAP_CSUM_UDPv6_Tx;
   3040 	}
   3041 
   3042 	/*
   3043 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3044 	 *
   3045 	 *	82541GI (8086:1076) ... no
   3046 	 *	82572EI (8086:10b9) ... yes
   3047 	 */
   3048 	if (sc->sc_type >= WM_T_82571) {
   3049 		ifp->if_capabilities |=
   3050 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3051 	}
   3052 
   3053 	/*
   3054 	 * If we're a i82544 or greater (except i82547), we can do
   3055 	 * TCP segmentation offload.
   3056 	 */
   3057 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   3058 		ifp->if_capabilities |= IFCAP_TSOv4;
   3059 	}
   3060 
   3061 	if (sc->sc_type >= WM_T_82571) {
   3062 		ifp->if_capabilities |= IFCAP_TSOv6;
   3063 	}
   3064 
   3065 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3066 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3067 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3068 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3069 
   3070 	/* Attach the interface. */
   3071 	error = if_initialize(ifp);
   3072 	if (error != 0) {
   3073 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   3074 		    error);
   3075 		return; /* Error */
   3076 	}
   3077 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3078 	ether_ifattach(ifp, enaddr);
   3079 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3080 	if_register(ifp);
   3081 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3082 	    RND_FLAG_DEFAULT);
   3083 
   3084 #ifdef WM_EVENT_COUNTERS
   3085 	/* Attach event counters. */
   3086 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3087 	    NULL, xname, "linkintr");
   3088 
   3089 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3090 	    NULL, xname, "tx_xoff");
   3091 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3092 	    NULL, xname, "tx_xon");
   3093 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3094 	    NULL, xname, "rx_xoff");
   3095 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3096 	    NULL, xname, "rx_xon");
   3097 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3098 	    NULL, xname, "rx_macctl");
   3099 #endif /* WM_EVENT_COUNTERS */
   3100 
   3101 	sc->sc_txrx_use_workqueue = false;
   3102 
   3103 	wm_init_sysctls(sc);
   3104 
   3105 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3106 		pmf_class_network_register(self, ifp);
   3107 	else
   3108 		aprint_error_dev(self, "couldn't establish power handler\n");
   3109 
   3110 	sc->sc_flags |= WM_F_ATTACHED;
   3111 out:
   3112 	return;
   3113 }
   3114 
   3115 /* The detach function (ca_detach) */
   3116 static int
   3117 wm_detach(device_t self, int flags __unused)
   3118 {
   3119 	struct wm_softc *sc = device_private(self);
   3120 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3121 	int i;
   3122 
   3123 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3124 		return 0;
   3125 
   3126 	/* Stop the interface. Callouts are stopped in it. */
   3127 	wm_stop(ifp, 1);
   3128 
   3129 	pmf_device_deregister(self);
   3130 
   3131 	sysctl_teardown(&sc->sc_sysctllog);
   3132 
   3133 #ifdef WM_EVENT_COUNTERS
   3134 	evcnt_detach(&sc->sc_ev_linkintr);
   3135 
   3136 	evcnt_detach(&sc->sc_ev_tx_xoff);
   3137 	evcnt_detach(&sc->sc_ev_tx_xon);
   3138 	evcnt_detach(&sc->sc_ev_rx_xoff);
   3139 	evcnt_detach(&sc->sc_ev_rx_xon);
   3140 	evcnt_detach(&sc->sc_ev_rx_macctl);
   3141 #endif /* WM_EVENT_COUNTERS */
   3142 
   3143 	rnd_detach_source(&sc->rnd_source);
   3144 
   3145 	/* Tell the firmware about the release */
   3146 	WM_CORE_LOCK(sc);
   3147 	wm_release_manageability(sc);
   3148 	wm_release_hw_control(sc);
   3149 	wm_enable_wakeup(sc);
   3150 	WM_CORE_UNLOCK(sc);
   3151 
   3152 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3153 
   3154 	ether_ifdetach(ifp);
   3155 	if_detach(ifp);
   3156 	if_percpuq_destroy(sc->sc_ipq);
   3157 
   3158 	/* Delete all remaining media. */
   3159 	ifmedia_fini(&sc->sc_mii.mii_media);
   3160 
   3161 	/* Unload RX dmamaps and free mbufs */
   3162 	for (i = 0; i < sc->sc_nqueues; i++) {
   3163 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3164 		mutex_enter(rxq->rxq_lock);
   3165 		wm_rxdrain(rxq);
   3166 		mutex_exit(rxq->rxq_lock);
   3167 	}
   3168 	/* Must unlock here */
   3169 
   3170 	/* Disestablish the interrupt handler */
   3171 	for (i = 0; i < sc->sc_nintrs; i++) {
   3172 		if (sc->sc_ihs[i] != NULL) {
   3173 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3174 			sc->sc_ihs[i] = NULL;
   3175 		}
   3176 	}
   3177 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3178 
   3179 	/* wm_stop() ensure workqueue is stopped. */
   3180 	workqueue_destroy(sc->sc_queue_wq);
   3181 
   3182 	for (i = 0; i < sc->sc_nqueues; i++)
   3183 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3184 
   3185 	wm_free_txrx_queues(sc);
   3186 
   3187 	/* Unmap the registers */
   3188 	if (sc->sc_ss) {
   3189 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3190 		sc->sc_ss = 0;
   3191 	}
   3192 	if (sc->sc_ios) {
   3193 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3194 		sc->sc_ios = 0;
   3195 	}
   3196 	if (sc->sc_flashs) {
   3197 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3198 		sc->sc_flashs = 0;
   3199 	}
   3200 
   3201 	if (sc->sc_core_lock)
   3202 		mutex_obj_free(sc->sc_core_lock);
   3203 	if (sc->sc_ich_phymtx)
   3204 		mutex_obj_free(sc->sc_ich_phymtx);
   3205 	if (sc->sc_ich_nvmmtx)
   3206 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3207 
   3208 	return 0;
   3209 }
   3210 
   3211 static bool
   3212 wm_suspend(device_t self, const pmf_qual_t *qual)
   3213 {
   3214 	struct wm_softc *sc = device_private(self);
   3215 
   3216 	wm_release_manageability(sc);
   3217 	wm_release_hw_control(sc);
   3218 	wm_enable_wakeup(sc);
   3219 
   3220 	return true;
   3221 }
   3222 
   3223 static bool
   3224 wm_resume(device_t self, const pmf_qual_t *qual)
   3225 {
   3226 	struct wm_softc *sc = device_private(self);
   3227 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3228 	pcireg_t reg;
   3229 	char buf[256];
   3230 
   3231 	reg = CSR_READ(sc, WMREG_WUS);
   3232 	if (reg != 0) {
   3233 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3234 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3235 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3236 	}
   3237 
   3238 	if (sc->sc_type >= WM_T_PCH2)
   3239 		wm_resume_workarounds_pchlan(sc);
   3240 	if ((ifp->if_flags & IFF_UP) == 0) {
   3241 		wm_reset(sc);
   3242 		/* Non-AMT based hardware can now take control from firmware */
   3243 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3244 			wm_get_hw_control(sc);
   3245 		wm_init_manageability(sc);
   3246 	} else {
   3247 		/*
   3248 		 * We called pmf_class_network_register(), so if_init() is
   3249 		 * automatically called when IFF_UP. wm_reset(),
   3250 		 * wm_get_hw_control() and wm_init_manageability() are called
   3251 		 * via wm_init().
   3252 		 */
   3253 	}
   3254 
   3255 	return true;
   3256 }
   3257 
   3258 /*
   3259  * wm_watchdog:		[ifnet interface function]
   3260  *
   3261  *	Watchdog timer handler.
   3262  */
   3263 static void
   3264 wm_watchdog(struct ifnet *ifp)
   3265 {
   3266 	int qid;
   3267 	struct wm_softc *sc = ifp->if_softc;
   3268 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3269 
   3270 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3271 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3272 
   3273 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3274 	}
   3275 
   3276 	/* IF any of queues hanged up, reset the interface. */
   3277 	if (hang_queue != 0) {
   3278 		(void)wm_init(ifp);
   3279 
   3280 		/*
   3281 		 * There are still some upper layer processing which call
   3282 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3283 		 */
   3284 		/* Try to get more packets going. */
   3285 		ifp->if_start(ifp);
   3286 	}
   3287 }
   3288 
   3289 
   3290 static void
   3291 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3292 {
   3293 
   3294 	mutex_enter(txq->txq_lock);
   3295 	if (txq->txq_sending &&
   3296 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3297 		wm_watchdog_txq_locked(ifp, txq, hang);
   3298 
   3299 	mutex_exit(txq->txq_lock);
   3300 }
   3301 
   3302 static void
   3303 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3304     uint16_t *hang)
   3305 {
   3306 	struct wm_softc *sc = ifp->if_softc;
   3307 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3308 
   3309 	KASSERT(mutex_owned(txq->txq_lock));
   3310 
   3311 	/*
   3312 	 * Since we're using delayed interrupts, sweep up
   3313 	 * before we report an error.
   3314 	 */
   3315 	wm_txeof(txq, UINT_MAX);
   3316 
   3317 	if (txq->txq_sending)
   3318 		*hang |= __BIT(wmq->wmq_id);
   3319 
   3320 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3321 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3322 		    device_xname(sc->sc_dev));
   3323 	} else {
   3324 #ifdef WM_DEBUG
   3325 		int i, j;
   3326 		struct wm_txsoft *txs;
   3327 #endif
   3328 		log(LOG_ERR,
   3329 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3330 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3331 		    txq->txq_next);
   3332 		if_statinc(ifp, if_oerrors);
   3333 #ifdef WM_DEBUG
   3334 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3335 		    i = WM_NEXTTXS(txq, i)) {
   3336 			txs = &txq->txq_soft[i];
   3337 			printf("txs %d tx %d -> %d\n",
   3338 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3339 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3340 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3341 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3342 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3343 					printf("\t %#08x%08x\n",
   3344 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3345 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3346 				} else {
   3347 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3348 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3349 					    txq->txq_descs[j].wtx_addr.wa_low);
   3350 					printf("\t %#04x%02x%02x%08x\n",
   3351 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3352 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3353 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3354 					    txq->txq_descs[j].wtx_cmdlen);
   3355 				}
   3356 				if (j == txs->txs_lastdesc)
   3357 					break;
   3358 			}
   3359 		}
   3360 #endif
   3361 	}
   3362 }
   3363 
   3364 /*
   3365  * wm_tick:
   3366  *
   3367  *	One second timer, used to check link status, sweep up
   3368  *	completed transmit jobs, etc.
   3369  */
   3370 static void
   3371 wm_tick(void *arg)
   3372 {
   3373 	struct wm_softc *sc = arg;
   3374 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3375 #ifndef WM_MPSAFE
   3376 	int s = splnet();
   3377 #endif
   3378 
   3379 	WM_CORE_LOCK(sc);
   3380 
   3381 	if (sc->sc_core_stopping) {
   3382 		WM_CORE_UNLOCK(sc);
   3383 #ifndef WM_MPSAFE
   3384 		splx(s);
   3385 #endif
   3386 		return;
   3387 	}
   3388 
   3389 	if (sc->sc_type >= WM_T_82542_2_1) {
   3390 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3391 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3392 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3393 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3394 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3395 	}
   3396 
   3397 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   3398 	if_statadd_ref(nsr, if_collisions, CSR_READ(sc, WMREG_COLC));
   3399 	if_statadd_ref(nsr, if_ierrors, 0ULL /* ensure quad_t */
   3400 	    + CSR_READ(sc, WMREG_CRCERRS)
   3401 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3402 	    + CSR_READ(sc, WMREG_SYMERRC)
   3403 	    + CSR_READ(sc, WMREG_RXERRC)
   3404 	    + CSR_READ(sc, WMREG_SEC)
   3405 	    + CSR_READ(sc, WMREG_CEXTERR)
   3406 	    + CSR_READ(sc, WMREG_RLEC));
   3407 	/*
   3408 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3409 	 * memory. It does not mean the number of dropped packet. Because
   3410 	 * ethernet controller can receive packets in such case if there is
   3411 	 * space in phy's FIFO.
   3412 	 *
   3413 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3414 	 * own EVCNT instead of if_iqdrops.
   3415 	 */
   3416 	if_statadd_ref(nsr, if_iqdrops, CSR_READ(sc, WMREG_MPC));
   3417 	IF_STAT_PUTREF(ifp);
   3418 
   3419 	if (sc->sc_flags & WM_F_HAS_MII)
   3420 		mii_tick(&sc->sc_mii);
   3421 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3422 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3423 		wm_serdes_tick(sc);
   3424 	else
   3425 		wm_tbi_tick(sc);
   3426 
   3427 	WM_CORE_UNLOCK(sc);
   3428 
   3429 	wm_watchdog(ifp);
   3430 
   3431 	callout_schedule(&sc->sc_tick_ch, hz);
   3432 }
   3433 
   3434 static int
   3435 wm_ifflags_cb(struct ethercom *ec)
   3436 {
   3437 	struct ifnet *ifp = &ec->ec_if;
   3438 	struct wm_softc *sc = ifp->if_softc;
   3439 	u_short iffchange;
   3440 	int ecchange;
   3441 	bool needreset = false;
   3442 	int rc = 0;
   3443 
   3444 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3445 		device_xname(sc->sc_dev), __func__));
   3446 
   3447 	WM_CORE_LOCK(sc);
   3448 
   3449 	/*
   3450 	 * Check for if_flags.
   3451 	 * Main usage is to prevent linkdown when opening bpf.
   3452 	 */
   3453 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3454 	sc->sc_if_flags = ifp->if_flags;
   3455 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3456 		needreset = true;
   3457 		goto ec;
   3458 	}
   3459 
   3460 	/* iff related updates */
   3461 	if ((iffchange & IFF_PROMISC) != 0)
   3462 		wm_set_filter(sc);
   3463 
   3464 	wm_set_vlan(sc);
   3465 
   3466 ec:
   3467 	/* Check for ec_capenable. */
   3468 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3469 	sc->sc_ec_capenable = ec->ec_capenable;
   3470 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3471 		needreset = true;
   3472 		goto out;
   3473 	}
   3474 
   3475 	/* ec related updates */
   3476 	wm_set_eee(sc);
   3477 
   3478 out:
   3479 	if (needreset)
   3480 		rc = ENETRESET;
   3481 	WM_CORE_UNLOCK(sc);
   3482 
   3483 	return rc;
   3484 }
   3485 
   3486 /*
   3487  * wm_ioctl:		[ifnet interface function]
   3488  *
   3489  *	Handle control requests from the operator.
   3490  */
   3491 static int
   3492 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3493 {
   3494 	struct wm_softc *sc = ifp->if_softc;
   3495 	struct ifreq *ifr = (struct ifreq *)data;
   3496 	struct ifaddr *ifa = (struct ifaddr *)data;
   3497 	struct sockaddr_dl *sdl;
   3498 	int s, error;
   3499 
   3500 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3501 		device_xname(sc->sc_dev), __func__));
   3502 
   3503 #ifndef WM_MPSAFE
   3504 	s = splnet();
   3505 #endif
   3506 	switch (cmd) {
   3507 	case SIOCSIFMEDIA:
   3508 		WM_CORE_LOCK(sc);
   3509 		/* Flow control requires full-duplex mode. */
   3510 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3511 		    (ifr->ifr_media & IFM_FDX) == 0)
   3512 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3513 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3514 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3515 				/* We can do both TXPAUSE and RXPAUSE. */
   3516 				ifr->ifr_media |=
   3517 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3518 			}
   3519 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3520 		}
   3521 		WM_CORE_UNLOCK(sc);
   3522 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3523 		break;
   3524 	case SIOCINITIFADDR:
   3525 		WM_CORE_LOCK(sc);
   3526 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3527 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3528 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3529 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3530 			/* Unicast address is the first multicast entry */
   3531 			wm_set_filter(sc);
   3532 			error = 0;
   3533 			WM_CORE_UNLOCK(sc);
   3534 			break;
   3535 		}
   3536 		WM_CORE_UNLOCK(sc);
   3537 		/*FALLTHROUGH*/
   3538 	default:
   3539 #ifdef WM_MPSAFE
   3540 		s = splnet();
   3541 #endif
   3542 		/* It may call wm_start, so unlock here */
   3543 		error = ether_ioctl(ifp, cmd, data);
   3544 #ifdef WM_MPSAFE
   3545 		splx(s);
   3546 #endif
   3547 		if (error != ENETRESET)
   3548 			break;
   3549 
   3550 		error = 0;
   3551 
   3552 		if (cmd == SIOCSIFCAP)
   3553 			error = (*ifp->if_init)(ifp);
   3554 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3555 			;
   3556 		else if (ifp->if_flags & IFF_RUNNING) {
   3557 			/*
   3558 			 * Multicast list has changed; set the hardware filter
   3559 			 * accordingly.
   3560 			 */
   3561 			WM_CORE_LOCK(sc);
   3562 			wm_set_filter(sc);
   3563 			WM_CORE_UNLOCK(sc);
   3564 		}
   3565 		break;
   3566 	}
   3567 
   3568 #ifndef WM_MPSAFE
   3569 	splx(s);
   3570 #endif
   3571 	return error;
   3572 }
   3573 
   3574 /* MAC address related */
   3575 
   3576 /*
   3577  * Get the offset of MAC address and return it.
   3578  * If error occured, use offset 0.
   3579  */
   3580 static uint16_t
   3581 wm_check_alt_mac_addr(struct wm_softc *sc)
   3582 {
   3583 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3584 	uint16_t offset = NVM_OFF_MACADDR;
   3585 
   3586 	/* Try to read alternative MAC address pointer */
   3587 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3588 		return 0;
   3589 
   3590 	/* Check pointer if it's valid or not. */
   3591 	if ((offset == 0x0000) || (offset == 0xffff))
   3592 		return 0;
   3593 
   3594 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3595 	/*
   3596 	 * Check whether alternative MAC address is valid or not.
   3597 	 * Some cards have non 0xffff pointer but those don't use
   3598 	 * alternative MAC address in reality.
   3599 	 *
   3600 	 * Check whether the broadcast bit is set or not.
   3601 	 */
   3602 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3603 		if (((myea[0] & 0xff) & 0x01) == 0)
   3604 			return offset; /* Found */
   3605 
   3606 	/* Not found */
   3607 	return 0;
   3608 }
   3609 
   3610 static int
   3611 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3612 {
   3613 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3614 	uint16_t offset = NVM_OFF_MACADDR;
   3615 	int do_invert = 0;
   3616 
   3617 	switch (sc->sc_type) {
   3618 	case WM_T_82580:
   3619 	case WM_T_I350:
   3620 	case WM_T_I354:
   3621 		/* EEPROM Top Level Partitioning */
   3622 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3623 		break;
   3624 	case WM_T_82571:
   3625 	case WM_T_82575:
   3626 	case WM_T_82576:
   3627 	case WM_T_80003:
   3628 	case WM_T_I210:
   3629 	case WM_T_I211:
   3630 		offset = wm_check_alt_mac_addr(sc);
   3631 		if (offset == 0)
   3632 			if ((sc->sc_funcid & 0x01) == 1)
   3633 				do_invert = 1;
   3634 		break;
   3635 	default:
   3636 		if ((sc->sc_funcid & 0x01) == 1)
   3637 			do_invert = 1;
   3638 		break;
   3639 	}
   3640 
   3641 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3642 		goto bad;
   3643 
   3644 	enaddr[0] = myea[0] & 0xff;
   3645 	enaddr[1] = myea[0] >> 8;
   3646 	enaddr[2] = myea[1] & 0xff;
   3647 	enaddr[3] = myea[1] >> 8;
   3648 	enaddr[4] = myea[2] & 0xff;
   3649 	enaddr[5] = myea[2] >> 8;
   3650 
   3651 	/*
   3652 	 * Toggle the LSB of the MAC address on the second port
   3653 	 * of some dual port cards.
   3654 	 */
   3655 	if (do_invert != 0)
   3656 		enaddr[5] ^= 1;
   3657 
   3658 	return 0;
   3659 
   3660  bad:
   3661 	return -1;
   3662 }
   3663 
   3664 /*
   3665  * wm_set_ral:
   3666  *
   3667  *	Set an entery in the receive address list.
   3668  */
   3669 static void
   3670 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3671 {
   3672 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3673 	uint32_t wlock_mac;
   3674 	int rv;
   3675 
   3676 	if (enaddr != NULL) {
   3677 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   3678 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   3679 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   3680 		ral_hi |= RAL_AV;
   3681 	} else {
   3682 		ral_lo = 0;
   3683 		ral_hi = 0;
   3684 	}
   3685 
   3686 	switch (sc->sc_type) {
   3687 	case WM_T_82542_2_0:
   3688 	case WM_T_82542_2_1:
   3689 	case WM_T_82543:
   3690 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3691 		CSR_WRITE_FLUSH(sc);
   3692 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3693 		CSR_WRITE_FLUSH(sc);
   3694 		break;
   3695 	case WM_T_PCH2:
   3696 	case WM_T_PCH_LPT:
   3697 	case WM_T_PCH_SPT:
   3698 	case WM_T_PCH_CNP:
   3699 		if (idx == 0) {
   3700 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3701 			CSR_WRITE_FLUSH(sc);
   3702 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3703 			CSR_WRITE_FLUSH(sc);
   3704 			return;
   3705 		}
   3706 		if (sc->sc_type != WM_T_PCH2) {
   3707 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3708 			    FWSM_WLOCK_MAC);
   3709 			addrl = WMREG_SHRAL(idx - 1);
   3710 			addrh = WMREG_SHRAH(idx - 1);
   3711 		} else {
   3712 			wlock_mac = 0;
   3713 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3714 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3715 		}
   3716 
   3717 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3718 			rv = wm_get_swflag_ich8lan(sc);
   3719 			if (rv != 0)
   3720 				return;
   3721 			CSR_WRITE(sc, addrl, ral_lo);
   3722 			CSR_WRITE_FLUSH(sc);
   3723 			CSR_WRITE(sc, addrh, ral_hi);
   3724 			CSR_WRITE_FLUSH(sc);
   3725 			wm_put_swflag_ich8lan(sc);
   3726 		}
   3727 
   3728 		break;
   3729 	default:
   3730 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3731 		CSR_WRITE_FLUSH(sc);
   3732 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3733 		CSR_WRITE_FLUSH(sc);
   3734 		break;
   3735 	}
   3736 }
   3737 
   3738 /*
   3739  * wm_mchash:
   3740  *
   3741  *	Compute the hash of the multicast address for the 4096-bit
   3742  *	multicast filter.
   3743  */
   3744 static uint32_t
   3745 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3746 {
   3747 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3748 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3749 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3750 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3751 	uint32_t hash;
   3752 
   3753 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3754 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3755 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3756 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3757 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3758 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3759 		return (hash & 0x3ff);
   3760 	}
   3761 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3762 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3763 
   3764 	return (hash & 0xfff);
   3765 }
   3766 
   3767 /*
   3768  *
   3769  *
   3770  */
   3771 static int
   3772 wm_rar_count(struct wm_softc *sc)
   3773 {
   3774 	int size;
   3775 
   3776 	switch (sc->sc_type) {
   3777 	case WM_T_ICH8:
   3778 		size = WM_RAL_TABSIZE_ICH8 -1;
   3779 		break;
   3780 	case WM_T_ICH9:
   3781 	case WM_T_ICH10:
   3782 	case WM_T_PCH:
   3783 		size = WM_RAL_TABSIZE_ICH8;
   3784 		break;
   3785 	case WM_T_PCH2:
   3786 		size = WM_RAL_TABSIZE_PCH2;
   3787 		break;
   3788 	case WM_T_PCH_LPT:
   3789 	case WM_T_PCH_SPT:
   3790 	case WM_T_PCH_CNP:
   3791 		size = WM_RAL_TABSIZE_PCH_LPT;
   3792 		break;
   3793 	case WM_T_82575:
   3794 	case WM_T_I210:
   3795 	case WM_T_I211:
   3796 		size = WM_RAL_TABSIZE_82575;
   3797 		break;
   3798 	case WM_T_82576:
   3799 	case WM_T_82580:
   3800 		size = WM_RAL_TABSIZE_82576;
   3801 		break;
   3802 	case WM_T_I350:
   3803 	case WM_T_I354:
   3804 		size = WM_RAL_TABSIZE_I350;
   3805 		break;
   3806 	default:
   3807 		size = WM_RAL_TABSIZE;
   3808 	}
   3809 
   3810 	return size;
   3811 }
   3812 
   3813 /*
   3814  * wm_set_filter:
   3815  *
   3816  *	Set up the receive filter.
   3817  */
   3818 static void
   3819 wm_set_filter(struct wm_softc *sc)
   3820 {
   3821 	struct ethercom *ec = &sc->sc_ethercom;
   3822 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3823 	struct ether_multi *enm;
   3824 	struct ether_multistep step;
   3825 	bus_addr_t mta_reg;
   3826 	uint32_t hash, reg, bit;
   3827 	int i, size, ralmax, rv;
   3828 
   3829 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3830 		device_xname(sc->sc_dev), __func__));
   3831 
   3832 	if (sc->sc_type >= WM_T_82544)
   3833 		mta_reg = WMREG_CORDOVA_MTA;
   3834 	else
   3835 		mta_reg = WMREG_MTA;
   3836 
   3837 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3838 
   3839 	if (ifp->if_flags & IFF_BROADCAST)
   3840 		sc->sc_rctl |= RCTL_BAM;
   3841 	if (ifp->if_flags & IFF_PROMISC) {
   3842 		sc->sc_rctl |= RCTL_UPE;
   3843 		ETHER_LOCK(ec);
   3844 		ec->ec_flags |= ETHER_F_ALLMULTI;
   3845 		ETHER_UNLOCK(ec);
   3846 		goto allmulti;
   3847 	}
   3848 
   3849 	/*
   3850 	 * Set the station address in the first RAL slot, and
   3851 	 * clear the remaining slots.
   3852 	 */
   3853 	size = wm_rar_count(sc);
   3854 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3855 
   3856 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3857 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3858 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3859 		switch (i) {
   3860 		case 0:
   3861 			/* We can use all entries */
   3862 			ralmax = size;
   3863 			break;
   3864 		case 1:
   3865 			/* Only RAR[0] */
   3866 			ralmax = 1;
   3867 			break;
   3868 		default:
   3869 			/* Available SHRA + RAR[0] */
   3870 			ralmax = i + 1;
   3871 		}
   3872 	} else
   3873 		ralmax = size;
   3874 	for (i = 1; i < size; i++) {
   3875 		if (i < ralmax)
   3876 			wm_set_ral(sc, NULL, i);
   3877 	}
   3878 
   3879 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3880 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3881 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3882 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3883 		size = WM_ICH8_MC_TABSIZE;
   3884 	else
   3885 		size = WM_MC_TABSIZE;
   3886 	/* Clear out the multicast table. */
   3887 	for (i = 0; i < size; i++) {
   3888 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3889 		CSR_WRITE_FLUSH(sc);
   3890 	}
   3891 
   3892 	ETHER_LOCK(ec);
   3893 	ETHER_FIRST_MULTI(step, ec, enm);
   3894 	while (enm != NULL) {
   3895 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3896 			ec->ec_flags |= ETHER_F_ALLMULTI;
   3897 			ETHER_UNLOCK(ec);
   3898 			/*
   3899 			 * We must listen to a range of multicast addresses.
   3900 			 * For now, just accept all multicasts, rather than
   3901 			 * trying to set only those filter bits needed to match
   3902 			 * the range.  (At this time, the only use of address
   3903 			 * ranges is for IP multicast routing, for which the
   3904 			 * range is big enough to require all bits set.)
   3905 			 */
   3906 			goto allmulti;
   3907 		}
   3908 
   3909 		hash = wm_mchash(sc, enm->enm_addrlo);
   3910 
   3911 		reg = (hash >> 5);
   3912 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3913 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3914 		    || (sc->sc_type == WM_T_PCH2)
   3915 		    || (sc->sc_type == WM_T_PCH_LPT)
   3916 		    || (sc->sc_type == WM_T_PCH_SPT)
   3917 		    || (sc->sc_type == WM_T_PCH_CNP))
   3918 			reg &= 0x1f;
   3919 		else
   3920 			reg &= 0x7f;
   3921 		bit = hash & 0x1f;
   3922 
   3923 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3924 		hash |= 1U << bit;
   3925 
   3926 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3927 			/*
   3928 			 * 82544 Errata 9: Certain register cannot be written
   3929 			 * with particular alignments in PCI-X bus operation
   3930 			 * (FCAH, MTA and VFTA).
   3931 			 */
   3932 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3933 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3934 			CSR_WRITE_FLUSH(sc);
   3935 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3936 			CSR_WRITE_FLUSH(sc);
   3937 		} else {
   3938 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3939 			CSR_WRITE_FLUSH(sc);
   3940 		}
   3941 
   3942 		ETHER_NEXT_MULTI(step, enm);
   3943 	}
   3944 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   3945 	ETHER_UNLOCK(ec);
   3946 
   3947 	goto setit;
   3948 
   3949  allmulti:
   3950 	sc->sc_rctl |= RCTL_MPE;
   3951 
   3952  setit:
   3953 	if (sc->sc_type >= WM_T_PCH2) {
   3954 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   3955 		    && (ifp->if_mtu > ETHERMTU))
   3956 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
   3957 		else
   3958 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
   3959 		if (rv != 0)
   3960 			device_printf(sc->sc_dev,
   3961 			    "Failed to do workaround for jumbo frame.\n");
   3962 	}
   3963 
   3964 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3965 }
   3966 
   3967 /* Reset and init related */
   3968 
   3969 static void
   3970 wm_set_vlan(struct wm_softc *sc)
   3971 {
   3972 
   3973 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3974 		device_xname(sc->sc_dev), __func__));
   3975 
   3976 	/* Deal with VLAN enables. */
   3977 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3978 		sc->sc_ctrl |= CTRL_VME;
   3979 	else
   3980 		sc->sc_ctrl &= ~CTRL_VME;
   3981 
   3982 	/* Write the control registers. */
   3983 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3984 }
   3985 
   3986 static void
   3987 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3988 {
   3989 	uint32_t gcr;
   3990 	pcireg_t ctrl2;
   3991 
   3992 	gcr = CSR_READ(sc, WMREG_GCR);
   3993 
   3994 	/* Only take action if timeout value is defaulted to 0 */
   3995 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3996 		goto out;
   3997 
   3998 	if ((gcr & GCR_CAP_VER2) == 0) {
   3999 		gcr |= GCR_CMPL_TMOUT_10MS;
   4000 		goto out;
   4001 	}
   4002 
   4003 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4004 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   4005 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   4006 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4007 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   4008 
   4009 out:
   4010 	/* Disable completion timeout resend */
   4011 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   4012 
   4013 	CSR_WRITE(sc, WMREG_GCR, gcr);
   4014 }
   4015 
   4016 void
   4017 wm_get_auto_rd_done(struct wm_softc *sc)
   4018 {
   4019 	int i;
   4020 
   4021 	/* wait for eeprom to reload */
   4022 	switch (sc->sc_type) {
   4023 	case WM_T_82571:
   4024 	case WM_T_82572:
   4025 	case WM_T_82573:
   4026 	case WM_T_82574:
   4027 	case WM_T_82583:
   4028 	case WM_T_82575:
   4029 	case WM_T_82576:
   4030 	case WM_T_82580:
   4031 	case WM_T_I350:
   4032 	case WM_T_I354:
   4033 	case WM_T_I210:
   4034 	case WM_T_I211:
   4035 	case WM_T_80003:
   4036 	case WM_T_ICH8:
   4037 	case WM_T_ICH9:
   4038 		for (i = 0; i < 10; i++) {
   4039 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   4040 				break;
   4041 			delay(1000);
   4042 		}
   4043 		if (i == 10) {
   4044 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4045 			    "complete\n", device_xname(sc->sc_dev));
   4046 		}
   4047 		break;
   4048 	default:
   4049 		break;
   4050 	}
   4051 }
   4052 
   4053 void
   4054 wm_lan_init_done(struct wm_softc *sc)
   4055 {
   4056 	uint32_t reg = 0;
   4057 	int i;
   4058 
   4059 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4060 		device_xname(sc->sc_dev), __func__));
   4061 
   4062 	/* Wait for eeprom to reload */
   4063 	switch (sc->sc_type) {
   4064 	case WM_T_ICH10:
   4065 	case WM_T_PCH:
   4066 	case WM_T_PCH2:
   4067 	case WM_T_PCH_LPT:
   4068 	case WM_T_PCH_SPT:
   4069 	case WM_T_PCH_CNP:
   4070 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4071 			reg = CSR_READ(sc, WMREG_STATUS);
   4072 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4073 				break;
   4074 			delay(100);
   4075 		}
   4076 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4077 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4078 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4079 		}
   4080 		break;
   4081 	default:
   4082 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4083 		    __func__);
   4084 		break;
   4085 	}
   4086 
   4087 	reg &= ~STATUS_LAN_INIT_DONE;
   4088 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4089 }
   4090 
   4091 void
   4092 wm_get_cfg_done(struct wm_softc *sc)
   4093 {
   4094 	int mask;
   4095 	uint32_t reg;
   4096 	int i;
   4097 
   4098 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4099 		device_xname(sc->sc_dev), __func__));
   4100 
   4101 	/* Wait for eeprom to reload */
   4102 	switch (sc->sc_type) {
   4103 	case WM_T_82542_2_0:
   4104 	case WM_T_82542_2_1:
   4105 		/* null */
   4106 		break;
   4107 	case WM_T_82543:
   4108 	case WM_T_82544:
   4109 	case WM_T_82540:
   4110 	case WM_T_82545:
   4111 	case WM_T_82545_3:
   4112 	case WM_T_82546:
   4113 	case WM_T_82546_3:
   4114 	case WM_T_82541:
   4115 	case WM_T_82541_2:
   4116 	case WM_T_82547:
   4117 	case WM_T_82547_2:
   4118 	case WM_T_82573:
   4119 	case WM_T_82574:
   4120 	case WM_T_82583:
   4121 		/* generic */
   4122 		delay(10*1000);
   4123 		break;
   4124 	case WM_T_80003:
   4125 	case WM_T_82571:
   4126 	case WM_T_82572:
   4127 	case WM_T_82575:
   4128 	case WM_T_82576:
   4129 	case WM_T_82580:
   4130 	case WM_T_I350:
   4131 	case WM_T_I354:
   4132 	case WM_T_I210:
   4133 	case WM_T_I211:
   4134 		if (sc->sc_type == WM_T_82571) {
   4135 			/* Only 82571 shares port 0 */
   4136 			mask = EEMNGCTL_CFGDONE_0;
   4137 		} else
   4138 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4139 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4140 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4141 				break;
   4142 			delay(1000);
   4143 		}
   4144 		if (i >= WM_PHY_CFG_TIMEOUT)
   4145 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
   4146 				device_xname(sc->sc_dev), __func__));
   4147 		break;
   4148 	case WM_T_ICH8:
   4149 	case WM_T_ICH9:
   4150 	case WM_T_ICH10:
   4151 	case WM_T_PCH:
   4152 	case WM_T_PCH2:
   4153 	case WM_T_PCH_LPT:
   4154 	case WM_T_PCH_SPT:
   4155 	case WM_T_PCH_CNP:
   4156 		delay(10*1000);
   4157 		if (sc->sc_type >= WM_T_ICH10)
   4158 			wm_lan_init_done(sc);
   4159 		else
   4160 			wm_get_auto_rd_done(sc);
   4161 
   4162 		/* Clear PHY Reset Asserted bit */
   4163 		reg = CSR_READ(sc, WMREG_STATUS);
   4164 		if ((reg & STATUS_PHYRA) != 0)
   4165 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4166 		break;
   4167 	default:
   4168 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4169 		    __func__);
   4170 		break;
   4171 	}
   4172 }
   4173 
   4174 int
   4175 wm_phy_post_reset(struct wm_softc *sc)
   4176 {
   4177 	device_t dev = sc->sc_dev;
   4178 	uint16_t reg;
   4179 	int rv = 0;
   4180 
   4181 	/* This function is only for ICH8 and newer. */
   4182 	if (sc->sc_type < WM_T_ICH8)
   4183 		return 0;
   4184 
   4185 	if (wm_phy_resetisblocked(sc)) {
   4186 		/* XXX */
   4187 		device_printf(dev, "PHY is blocked\n");
   4188 		return -1;
   4189 	}
   4190 
   4191 	/* Allow time for h/w to get to quiescent state after reset */
   4192 	delay(10*1000);
   4193 
   4194 	/* Perform any necessary post-reset workarounds */
   4195 	if (sc->sc_type == WM_T_PCH)
   4196 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4197 	else if (sc->sc_type == WM_T_PCH2)
   4198 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4199 	if (rv != 0)
   4200 		return rv;
   4201 
   4202 	/* Clear the host wakeup bit after lcd reset */
   4203 	if (sc->sc_type >= WM_T_PCH) {
   4204 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4205 		reg &= ~BM_WUC_HOST_WU_BIT;
   4206 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4207 	}
   4208 
   4209 	/* Configure the LCD with the extended configuration region in NVM */
   4210 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4211 		return rv;
   4212 
   4213 	/* Configure the LCD with the OEM bits in NVM */
   4214 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4215 
   4216 	if (sc->sc_type == WM_T_PCH2) {
   4217 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4218 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4219 			delay(10 * 1000);
   4220 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4221 		}
   4222 		/* Set EEE LPI Update Timer to 200usec */
   4223 		rv = sc->phy.acquire(sc);
   4224 		if (rv)
   4225 			return rv;
   4226 		rv = wm_write_emi_reg_locked(dev,
   4227 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4228 		sc->phy.release(sc);
   4229 	}
   4230 
   4231 	return rv;
   4232 }
   4233 
   4234 /* Only for PCH and newer */
   4235 static int
   4236 wm_write_smbus_addr(struct wm_softc *sc)
   4237 {
   4238 	uint32_t strap, freq;
   4239 	uint16_t phy_data;
   4240 	int rv;
   4241 
   4242 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4243 		device_xname(sc->sc_dev), __func__));
   4244 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4245 
   4246 	strap = CSR_READ(sc, WMREG_STRAP);
   4247 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4248 
   4249 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4250 	if (rv != 0)
   4251 		return -1;
   4252 
   4253 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4254 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4255 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4256 
   4257 	if (sc->sc_phytype == WMPHY_I217) {
   4258 		/* Restore SMBus frequency */
   4259 		if (freq --) {
   4260 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4261 			    | HV_SMB_ADDR_FREQ_HIGH);
   4262 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4263 			    HV_SMB_ADDR_FREQ_LOW);
   4264 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4265 			    HV_SMB_ADDR_FREQ_HIGH);
   4266 		} else
   4267 			DPRINTF(sc, WM_DEBUG_INIT,
   4268 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4269 				device_xname(sc->sc_dev), __func__));
   4270 	}
   4271 
   4272 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4273 	    phy_data);
   4274 }
   4275 
   4276 static int
   4277 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4278 {
   4279 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4280 	uint16_t phy_page = 0;
   4281 	int rv = 0;
   4282 
   4283 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4284 		device_xname(sc->sc_dev), __func__));
   4285 
   4286 	switch (sc->sc_type) {
   4287 	case WM_T_ICH8:
   4288 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4289 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4290 			return 0;
   4291 
   4292 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4293 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4294 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4295 			break;
   4296 		}
   4297 		/* FALLTHROUGH */
   4298 	case WM_T_PCH:
   4299 	case WM_T_PCH2:
   4300 	case WM_T_PCH_LPT:
   4301 	case WM_T_PCH_SPT:
   4302 	case WM_T_PCH_CNP:
   4303 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4304 		break;
   4305 	default:
   4306 		return 0;
   4307 	}
   4308 
   4309 	if ((rv = sc->phy.acquire(sc)) != 0)
   4310 		return rv;
   4311 
   4312 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4313 	if ((reg & sw_cfg_mask) == 0)
   4314 		goto release;
   4315 
   4316 	/*
   4317 	 * Make sure HW does not configure LCD from PHY extended configuration
   4318 	 * before SW configuration
   4319 	 */
   4320 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4321 	if ((sc->sc_type < WM_T_PCH2)
   4322 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4323 		goto release;
   4324 
   4325 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4326 		device_xname(sc->sc_dev), __func__));
   4327 	/* word_addr is in DWORD */
   4328 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4329 
   4330 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4331 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4332 	if (cnf_size == 0)
   4333 		goto release;
   4334 
   4335 	if (((sc->sc_type == WM_T_PCH)
   4336 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4337 	    || (sc->sc_type > WM_T_PCH)) {
   4338 		/*
   4339 		 * HW configures the SMBus address and LEDs when the OEM and
   4340 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4341 		 * are cleared, SW will configure them instead.
   4342 		 */
   4343 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4344 			device_xname(sc->sc_dev), __func__));
   4345 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4346 			goto release;
   4347 
   4348 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4349 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4350 		    (uint16_t)reg);
   4351 		if (rv != 0)
   4352 			goto release;
   4353 	}
   4354 
   4355 	/* Configure LCD from extended configuration region. */
   4356 	for (i = 0; i < cnf_size; i++) {
   4357 		uint16_t reg_data, reg_addr;
   4358 
   4359 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4360 			goto release;
   4361 
   4362 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4363 			goto release;
   4364 
   4365 		if (reg_addr == IGPHY_PAGE_SELECT)
   4366 			phy_page = reg_data;
   4367 
   4368 		reg_addr &= IGPHY_MAXREGADDR;
   4369 		reg_addr |= phy_page;
   4370 
   4371 		KASSERT(sc->phy.writereg_locked != NULL);
   4372 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4373 		    reg_data);
   4374 	}
   4375 
   4376 release:
   4377 	sc->phy.release(sc);
   4378 	return rv;
   4379 }
   4380 
   4381 /*
   4382  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4383  *  @sc:       pointer to the HW structure
   4384  *  @d0_state: boolean if entering d0 or d3 device state
   4385  *
   4386  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4387  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4388  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4389  */
   4390 int
   4391 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4392 {
   4393 	uint32_t mac_reg;
   4394 	uint16_t oem_reg;
   4395 	int rv;
   4396 
   4397 	if (sc->sc_type < WM_T_PCH)
   4398 		return 0;
   4399 
   4400 	rv = sc->phy.acquire(sc);
   4401 	if (rv != 0)
   4402 		return rv;
   4403 
   4404 	if (sc->sc_type == WM_T_PCH) {
   4405 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4406 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4407 			goto release;
   4408 	}
   4409 
   4410 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4411 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4412 		goto release;
   4413 
   4414 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4415 
   4416 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4417 	if (rv != 0)
   4418 		goto release;
   4419 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4420 
   4421 	if (d0_state) {
   4422 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4423 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4424 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4425 			oem_reg |= HV_OEM_BITS_LPLU;
   4426 	} else {
   4427 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4428 		    != 0)
   4429 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4430 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4431 		    != 0)
   4432 			oem_reg |= HV_OEM_BITS_LPLU;
   4433 	}
   4434 
   4435 	/* Set Restart auto-neg to activate the bits */
   4436 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4437 	    && (wm_phy_resetisblocked(sc) == false))
   4438 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4439 
   4440 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4441 
   4442 release:
   4443 	sc->phy.release(sc);
   4444 
   4445 	return rv;
   4446 }
   4447 
   4448 /* Init hardware bits */
   4449 void
   4450 wm_initialize_hardware_bits(struct wm_softc *sc)
   4451 {
   4452 	uint32_t tarc0, tarc1, reg;
   4453 
   4454 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4455 		device_xname(sc->sc_dev), __func__));
   4456 
   4457 	/* For 82571 variant, 80003 and ICHs */
   4458 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4459 	    || (sc->sc_type >= WM_T_80003)) {
   4460 
   4461 		/* Transmit Descriptor Control 0 */
   4462 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4463 		reg |= TXDCTL_COUNT_DESC;
   4464 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4465 
   4466 		/* Transmit Descriptor Control 1 */
   4467 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4468 		reg |= TXDCTL_COUNT_DESC;
   4469 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4470 
   4471 		/* TARC0 */
   4472 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4473 		switch (sc->sc_type) {
   4474 		case WM_T_82571:
   4475 		case WM_T_82572:
   4476 		case WM_T_82573:
   4477 		case WM_T_82574:
   4478 		case WM_T_82583:
   4479 		case WM_T_80003:
   4480 			/* Clear bits 30..27 */
   4481 			tarc0 &= ~__BITS(30, 27);
   4482 			break;
   4483 		default:
   4484 			break;
   4485 		}
   4486 
   4487 		switch (sc->sc_type) {
   4488 		case WM_T_82571:
   4489 		case WM_T_82572:
   4490 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4491 
   4492 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4493 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4494 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4495 			/* 8257[12] Errata No.7 */
   4496 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4497 
   4498 			/* TARC1 bit 28 */
   4499 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4500 				tarc1 &= ~__BIT(28);
   4501 			else
   4502 				tarc1 |= __BIT(28);
   4503 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4504 
   4505 			/*
   4506 			 * 8257[12] Errata No.13
   4507 			 * Disable Dyamic Clock Gating.
   4508 			 */
   4509 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4510 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4511 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4512 			break;
   4513 		case WM_T_82573:
   4514 		case WM_T_82574:
   4515 		case WM_T_82583:
   4516 			if ((sc->sc_type == WM_T_82574)
   4517 			    || (sc->sc_type == WM_T_82583))
   4518 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4519 
   4520 			/* Extended Device Control */
   4521 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4522 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4523 			reg |= __BIT(22);	/* Set bit 22 */
   4524 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4525 
   4526 			/* Device Control */
   4527 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4528 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4529 
   4530 			/* PCIe Control Register */
   4531 			/*
   4532 			 * 82573 Errata (unknown).
   4533 			 *
   4534 			 * 82574 Errata 25 and 82583 Errata 12
   4535 			 * "Dropped Rx Packets":
   4536 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4537 			 */
   4538 			reg = CSR_READ(sc, WMREG_GCR);
   4539 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4540 			CSR_WRITE(sc, WMREG_GCR, reg);
   4541 
   4542 			if ((sc->sc_type == WM_T_82574)
   4543 			    || (sc->sc_type == WM_T_82583)) {
   4544 				/*
   4545 				 * Document says this bit must be set for
   4546 				 * proper operation.
   4547 				 */
   4548 				reg = CSR_READ(sc, WMREG_GCR);
   4549 				reg |= __BIT(22);
   4550 				CSR_WRITE(sc, WMREG_GCR, reg);
   4551 
   4552 				/*
   4553 				 * Apply workaround for hardware errata
   4554 				 * documented in errata docs Fixes issue where
   4555 				 * some error prone or unreliable PCIe
   4556 				 * completions are occurring, particularly
   4557 				 * with ASPM enabled. Without fix, issue can
   4558 				 * cause Tx timeouts.
   4559 				 */
   4560 				reg = CSR_READ(sc, WMREG_GCR2);
   4561 				reg |= __BIT(0);
   4562 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4563 			}
   4564 			break;
   4565 		case WM_T_80003:
   4566 			/* TARC0 */
   4567 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4568 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4569 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4570 
   4571 			/* TARC1 bit 28 */
   4572 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4573 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4574 				tarc1 &= ~__BIT(28);
   4575 			else
   4576 				tarc1 |= __BIT(28);
   4577 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4578 			break;
   4579 		case WM_T_ICH8:
   4580 		case WM_T_ICH9:
   4581 		case WM_T_ICH10:
   4582 		case WM_T_PCH:
   4583 		case WM_T_PCH2:
   4584 		case WM_T_PCH_LPT:
   4585 		case WM_T_PCH_SPT:
   4586 		case WM_T_PCH_CNP:
   4587 			/* TARC0 */
   4588 			if (sc->sc_type == WM_T_ICH8) {
   4589 				/* Set TARC0 bits 29 and 28 */
   4590 				tarc0 |= __BITS(29, 28);
   4591 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4592 				tarc0 |= __BIT(29);
   4593 				/*
   4594 				 *  Drop bit 28. From Linux.
   4595 				 * See I218/I219 spec update
   4596 				 * "5. Buffer Overrun While the I219 is
   4597 				 * Processing DMA Transactions"
   4598 				 */
   4599 				tarc0 &= ~__BIT(28);
   4600 			}
   4601 			/* Set TARC0 bits 23,24,26,27 */
   4602 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4603 
   4604 			/* CTRL_EXT */
   4605 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4606 			reg |= __BIT(22);	/* Set bit 22 */
   4607 			/*
   4608 			 * Enable PHY low-power state when MAC is at D3
   4609 			 * w/o WoL
   4610 			 */
   4611 			if (sc->sc_type >= WM_T_PCH)
   4612 				reg |= CTRL_EXT_PHYPDEN;
   4613 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4614 
   4615 			/* TARC1 */
   4616 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4617 			/* bit 28 */
   4618 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4619 				tarc1 &= ~__BIT(28);
   4620 			else
   4621 				tarc1 |= __BIT(28);
   4622 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4623 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4624 
   4625 			/* Device Status */
   4626 			if (sc->sc_type == WM_T_ICH8) {
   4627 				reg = CSR_READ(sc, WMREG_STATUS);
   4628 				reg &= ~__BIT(31);
   4629 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4630 
   4631 			}
   4632 
   4633 			/* IOSFPC */
   4634 			if (sc->sc_type == WM_T_PCH_SPT) {
   4635 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4636 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4637 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4638 			}
   4639 			/*
   4640 			 * Work-around descriptor data corruption issue during
   4641 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4642 			 * capability.
   4643 			 */
   4644 			reg = CSR_READ(sc, WMREG_RFCTL);
   4645 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4646 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4647 			break;
   4648 		default:
   4649 			break;
   4650 		}
   4651 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4652 
   4653 		switch (sc->sc_type) {
   4654 		/*
   4655 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4656 		 * Avoid RSS Hash Value bug.
   4657 		 */
   4658 		case WM_T_82571:
   4659 		case WM_T_82572:
   4660 		case WM_T_82573:
   4661 		case WM_T_80003:
   4662 		case WM_T_ICH8:
   4663 			reg = CSR_READ(sc, WMREG_RFCTL);
   4664 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4665 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4666 			break;
   4667 		case WM_T_82574:
   4668 			/* Use extened Rx descriptor. */
   4669 			reg = CSR_READ(sc, WMREG_RFCTL);
   4670 			reg |= WMREG_RFCTL_EXSTEN;
   4671 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4672 			break;
   4673 		default:
   4674 			break;
   4675 		}
   4676 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4677 		/*
   4678 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4679 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4680 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4681 		 * Correctly by the Device"
   4682 		 *
   4683 		 * I354(C2000) Errata AVR53:
   4684 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4685 		 * Hang"
   4686 		 */
   4687 		reg = CSR_READ(sc, WMREG_RFCTL);
   4688 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4689 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4690 	}
   4691 }
   4692 
   4693 static uint32_t
   4694 wm_rxpbs_adjust_82580(uint32_t val)
   4695 {
   4696 	uint32_t rv = 0;
   4697 
   4698 	if (val < __arraycount(wm_82580_rxpbs_table))
   4699 		rv = wm_82580_rxpbs_table[val];
   4700 
   4701 	return rv;
   4702 }
   4703 
   4704 /*
   4705  * wm_reset_phy:
   4706  *
   4707  *	generic PHY reset function.
   4708  *	Same as e1000_phy_hw_reset_generic()
   4709  */
   4710 static int
   4711 wm_reset_phy(struct wm_softc *sc)
   4712 {
   4713 	uint32_t reg;
   4714 
   4715 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4716 		device_xname(sc->sc_dev), __func__));
   4717 	if (wm_phy_resetisblocked(sc))
   4718 		return -1;
   4719 
   4720 	sc->phy.acquire(sc);
   4721 
   4722 	reg = CSR_READ(sc, WMREG_CTRL);
   4723 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4724 	CSR_WRITE_FLUSH(sc);
   4725 
   4726 	delay(sc->phy.reset_delay_us);
   4727 
   4728 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4729 	CSR_WRITE_FLUSH(sc);
   4730 
   4731 	delay(150);
   4732 
   4733 	sc->phy.release(sc);
   4734 
   4735 	wm_get_cfg_done(sc);
   4736 	wm_phy_post_reset(sc);
   4737 
   4738 	return 0;
   4739 }
   4740 
   4741 /*
   4742  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4743  * so it is enough to check sc->sc_queue[0] only.
   4744  */
   4745 static void
   4746 wm_flush_desc_rings(struct wm_softc *sc)
   4747 {
   4748 	pcireg_t preg;
   4749 	uint32_t reg;
   4750 	struct wm_txqueue *txq;
   4751 	wiseman_txdesc_t *txd;
   4752 	int nexttx;
   4753 	uint32_t rctl;
   4754 
   4755 	/* First, disable MULR fix in FEXTNVM11 */
   4756 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4757 	reg |= FEXTNVM11_DIS_MULRFIX;
   4758 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4759 
   4760 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4761 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4762 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4763 		return;
   4764 
   4765 	/* TX */
   4766 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x, len = %u)\n",
   4767 	    preg, reg);
   4768 	reg = CSR_READ(sc, WMREG_TCTL);
   4769 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4770 
   4771 	txq = &sc->sc_queue[0].wmq_txq;
   4772 	nexttx = txq->txq_next;
   4773 	txd = &txq->txq_descs[nexttx];
   4774 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4775 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4776 	txd->wtx_fields.wtxu_status = 0;
   4777 	txd->wtx_fields.wtxu_options = 0;
   4778 	txd->wtx_fields.wtxu_vlan = 0;
   4779 
   4780 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4781 	    BUS_SPACE_BARRIER_WRITE);
   4782 
   4783 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4784 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4785 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4786 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4787 	delay(250);
   4788 
   4789 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4790 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4791 		return;
   4792 
   4793 	/* RX */
   4794 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   4795 	rctl = CSR_READ(sc, WMREG_RCTL);
   4796 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4797 	CSR_WRITE_FLUSH(sc);
   4798 	delay(150);
   4799 
   4800 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4801 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   4802 	reg &= 0xffffc000;
   4803 	/*
   4804 	 * Update thresholds: prefetch threshold to 31, host threshold
   4805 	 * to 1 and make sure the granularity is "descriptors" and not
   4806 	 * "cache lines"
   4807 	 */
   4808 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4809 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4810 
   4811 	/* Momentarily enable the RX ring for the changes to take effect */
   4812 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4813 	CSR_WRITE_FLUSH(sc);
   4814 	delay(150);
   4815 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4816 }
   4817 
   4818 /*
   4819  * wm_reset:
   4820  *
   4821  *	Reset the i82542 chip.
   4822  */
   4823 static void
   4824 wm_reset(struct wm_softc *sc)
   4825 {
   4826 	int phy_reset = 0;
   4827 	int i, error = 0;
   4828 	uint32_t reg;
   4829 	uint16_t kmreg;
   4830 	int rv;
   4831 
   4832 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4833 		device_xname(sc->sc_dev), __func__));
   4834 	KASSERT(sc->sc_type != 0);
   4835 
   4836 	/*
   4837 	 * Allocate on-chip memory according to the MTU size.
   4838 	 * The Packet Buffer Allocation register must be written
   4839 	 * before the chip is reset.
   4840 	 */
   4841 	switch (sc->sc_type) {
   4842 	case WM_T_82547:
   4843 	case WM_T_82547_2:
   4844 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4845 		    PBA_22K : PBA_30K;
   4846 		for (i = 0; i < sc->sc_nqueues; i++) {
   4847 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4848 			txq->txq_fifo_head = 0;
   4849 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4850 			txq->txq_fifo_size =
   4851 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4852 			txq->txq_fifo_stall = 0;
   4853 		}
   4854 		break;
   4855 	case WM_T_82571:
   4856 	case WM_T_82572:
   4857 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4858 	case WM_T_80003:
   4859 		sc->sc_pba = PBA_32K;
   4860 		break;
   4861 	case WM_T_82573:
   4862 		sc->sc_pba = PBA_12K;
   4863 		break;
   4864 	case WM_T_82574:
   4865 	case WM_T_82583:
   4866 		sc->sc_pba = PBA_20K;
   4867 		break;
   4868 	case WM_T_82576:
   4869 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4870 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4871 		break;
   4872 	case WM_T_82580:
   4873 	case WM_T_I350:
   4874 	case WM_T_I354:
   4875 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4876 		break;
   4877 	case WM_T_I210:
   4878 	case WM_T_I211:
   4879 		sc->sc_pba = PBA_34K;
   4880 		break;
   4881 	case WM_T_ICH8:
   4882 		/* Workaround for a bit corruption issue in FIFO memory */
   4883 		sc->sc_pba = PBA_8K;
   4884 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4885 		break;
   4886 	case WM_T_ICH9:
   4887 	case WM_T_ICH10:
   4888 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4889 		    PBA_14K : PBA_10K;
   4890 		break;
   4891 	case WM_T_PCH:
   4892 	case WM_T_PCH2:	/* XXX 14K? */
   4893 	case WM_T_PCH_LPT:
   4894 	case WM_T_PCH_SPT:
   4895 	case WM_T_PCH_CNP:
   4896 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
   4897 		    PBA_12K : PBA_26K;
   4898 		break;
   4899 	default:
   4900 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4901 		    PBA_40K : PBA_48K;
   4902 		break;
   4903 	}
   4904 	/*
   4905 	 * Only old or non-multiqueue devices have the PBA register
   4906 	 * XXX Need special handling for 82575.
   4907 	 */
   4908 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4909 	    || (sc->sc_type == WM_T_82575))
   4910 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4911 
   4912 	/* Prevent the PCI-E bus from sticking */
   4913 	if (sc->sc_flags & WM_F_PCIE) {
   4914 		int timeout = 800;
   4915 
   4916 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4917 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4918 
   4919 		while (timeout--) {
   4920 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4921 			    == 0)
   4922 				break;
   4923 			delay(100);
   4924 		}
   4925 		if (timeout == 0)
   4926 			device_printf(sc->sc_dev,
   4927 			    "failed to disable busmastering\n");
   4928 	}
   4929 
   4930 	/* Set the completion timeout for interface */
   4931 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4932 	    || (sc->sc_type == WM_T_82580)
   4933 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4934 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4935 		wm_set_pcie_completion_timeout(sc);
   4936 
   4937 	/* Clear interrupt */
   4938 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4939 	if (wm_is_using_msix(sc)) {
   4940 		if (sc->sc_type != WM_T_82574) {
   4941 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4942 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4943 		} else
   4944 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4945 	}
   4946 
   4947 	/* Stop the transmit and receive processes. */
   4948 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4949 	sc->sc_rctl &= ~RCTL_EN;
   4950 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4951 	CSR_WRITE_FLUSH(sc);
   4952 
   4953 	/* XXX set_tbi_sbp_82543() */
   4954 
   4955 	delay(10*1000);
   4956 
   4957 	/* Must acquire the MDIO ownership before MAC reset */
   4958 	switch (sc->sc_type) {
   4959 	case WM_T_82573:
   4960 	case WM_T_82574:
   4961 	case WM_T_82583:
   4962 		error = wm_get_hw_semaphore_82573(sc);
   4963 		break;
   4964 	default:
   4965 		break;
   4966 	}
   4967 
   4968 	/*
   4969 	 * 82541 Errata 29? & 82547 Errata 28?
   4970 	 * See also the description about PHY_RST bit in CTRL register
   4971 	 * in 8254x_GBe_SDM.pdf.
   4972 	 */
   4973 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4974 		CSR_WRITE(sc, WMREG_CTRL,
   4975 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4976 		CSR_WRITE_FLUSH(sc);
   4977 		delay(5000);
   4978 	}
   4979 
   4980 	switch (sc->sc_type) {
   4981 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4982 	case WM_T_82541:
   4983 	case WM_T_82541_2:
   4984 	case WM_T_82547:
   4985 	case WM_T_82547_2:
   4986 		/*
   4987 		 * On some chipsets, a reset through a memory-mapped write
   4988 		 * cycle can cause the chip to reset before completing the
   4989 		 * write cycle. This causes major headache that can be avoided
   4990 		 * by issuing the reset via indirect register writes through
   4991 		 * I/O space.
   4992 		 *
   4993 		 * So, if we successfully mapped the I/O BAR at attach time,
   4994 		 * use that. Otherwise, try our luck with a memory-mapped
   4995 		 * reset.
   4996 		 */
   4997 		if (sc->sc_flags & WM_F_IOH_VALID)
   4998 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4999 		else
   5000 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   5001 		break;
   5002 	case WM_T_82545_3:
   5003 	case WM_T_82546_3:
   5004 		/* Use the shadow control register on these chips. */
   5005 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   5006 		break;
   5007 	case WM_T_80003:
   5008 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5009 		sc->phy.acquire(sc);
   5010 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5011 		sc->phy.release(sc);
   5012 		break;
   5013 	case WM_T_ICH8:
   5014 	case WM_T_ICH9:
   5015 	case WM_T_ICH10:
   5016 	case WM_T_PCH:
   5017 	case WM_T_PCH2:
   5018 	case WM_T_PCH_LPT:
   5019 	case WM_T_PCH_SPT:
   5020 	case WM_T_PCH_CNP:
   5021 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5022 		if (wm_phy_resetisblocked(sc) == false) {
   5023 			/*
   5024 			 * Gate automatic PHY configuration by hardware on
   5025 			 * non-managed 82579
   5026 			 */
   5027 			if ((sc->sc_type == WM_T_PCH2)
   5028 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   5029 				== 0))
   5030 				wm_gate_hw_phy_config_ich8lan(sc, true);
   5031 
   5032 			reg |= CTRL_PHY_RESET;
   5033 			phy_reset = 1;
   5034 		} else
   5035 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   5036 		sc->phy.acquire(sc);
   5037 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5038 		/* Don't insert a completion barrier when reset */
   5039 		delay(20*1000);
   5040 		mutex_exit(sc->sc_ich_phymtx);
   5041 		break;
   5042 	case WM_T_82580:
   5043 	case WM_T_I350:
   5044 	case WM_T_I354:
   5045 	case WM_T_I210:
   5046 	case WM_T_I211:
   5047 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5048 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5049 			CSR_WRITE_FLUSH(sc);
   5050 		delay(5000);
   5051 		break;
   5052 	case WM_T_82542_2_0:
   5053 	case WM_T_82542_2_1:
   5054 	case WM_T_82543:
   5055 	case WM_T_82540:
   5056 	case WM_T_82545:
   5057 	case WM_T_82546:
   5058 	case WM_T_82571:
   5059 	case WM_T_82572:
   5060 	case WM_T_82573:
   5061 	case WM_T_82574:
   5062 	case WM_T_82575:
   5063 	case WM_T_82576:
   5064 	case WM_T_82583:
   5065 	default:
   5066 		/* Everything else can safely use the documented method. */
   5067 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5068 		break;
   5069 	}
   5070 
   5071 	/* Must release the MDIO ownership after MAC reset */
   5072 	switch (sc->sc_type) {
   5073 	case WM_T_82573:
   5074 	case WM_T_82574:
   5075 	case WM_T_82583:
   5076 		if (error == 0)
   5077 			wm_put_hw_semaphore_82573(sc);
   5078 		break;
   5079 	default:
   5080 		break;
   5081 	}
   5082 
   5083 	/* Set Phy Config Counter to 50msec */
   5084 	if (sc->sc_type == WM_T_PCH2) {
   5085 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5086 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5087 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5088 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5089 	}
   5090 
   5091 	if (phy_reset != 0)
   5092 		wm_get_cfg_done(sc);
   5093 
   5094 	/* Reload EEPROM */
   5095 	switch (sc->sc_type) {
   5096 	case WM_T_82542_2_0:
   5097 	case WM_T_82542_2_1:
   5098 	case WM_T_82543:
   5099 	case WM_T_82544:
   5100 		delay(10);
   5101 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5102 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5103 		CSR_WRITE_FLUSH(sc);
   5104 		delay(2000);
   5105 		break;
   5106 	case WM_T_82540:
   5107 	case WM_T_82545:
   5108 	case WM_T_82545_3:
   5109 	case WM_T_82546:
   5110 	case WM_T_82546_3:
   5111 		delay(5*1000);
   5112 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5113 		break;
   5114 	case WM_T_82541:
   5115 	case WM_T_82541_2:
   5116 	case WM_T_82547:
   5117 	case WM_T_82547_2:
   5118 		delay(20000);
   5119 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5120 		break;
   5121 	case WM_T_82571:
   5122 	case WM_T_82572:
   5123 	case WM_T_82573:
   5124 	case WM_T_82574:
   5125 	case WM_T_82583:
   5126 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5127 			delay(10);
   5128 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5129 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5130 			CSR_WRITE_FLUSH(sc);
   5131 		}
   5132 		/* check EECD_EE_AUTORD */
   5133 		wm_get_auto_rd_done(sc);
   5134 		/*
   5135 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5136 		 * is set.
   5137 		 */
   5138 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5139 		    || (sc->sc_type == WM_T_82583))
   5140 			delay(25*1000);
   5141 		break;
   5142 	case WM_T_82575:
   5143 	case WM_T_82576:
   5144 	case WM_T_82580:
   5145 	case WM_T_I350:
   5146 	case WM_T_I354:
   5147 	case WM_T_I210:
   5148 	case WM_T_I211:
   5149 	case WM_T_80003:
   5150 		/* check EECD_EE_AUTORD */
   5151 		wm_get_auto_rd_done(sc);
   5152 		break;
   5153 	case WM_T_ICH8:
   5154 	case WM_T_ICH9:
   5155 	case WM_T_ICH10:
   5156 	case WM_T_PCH:
   5157 	case WM_T_PCH2:
   5158 	case WM_T_PCH_LPT:
   5159 	case WM_T_PCH_SPT:
   5160 	case WM_T_PCH_CNP:
   5161 		break;
   5162 	default:
   5163 		panic("%s: unknown type\n", __func__);
   5164 	}
   5165 
   5166 	/* Check whether EEPROM is present or not */
   5167 	switch (sc->sc_type) {
   5168 	case WM_T_82575:
   5169 	case WM_T_82576:
   5170 	case WM_T_82580:
   5171 	case WM_T_I350:
   5172 	case WM_T_I354:
   5173 	case WM_T_ICH8:
   5174 	case WM_T_ICH9:
   5175 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5176 			/* Not found */
   5177 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5178 			if (sc->sc_type == WM_T_82575)
   5179 				wm_reset_init_script_82575(sc);
   5180 		}
   5181 		break;
   5182 	default:
   5183 		break;
   5184 	}
   5185 
   5186 	if (phy_reset != 0)
   5187 		wm_phy_post_reset(sc);
   5188 
   5189 	if ((sc->sc_type == WM_T_82580)
   5190 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5191 		/* Clear global device reset status bit */
   5192 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5193 	}
   5194 
   5195 	/* Clear any pending interrupt events. */
   5196 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5197 	reg = CSR_READ(sc, WMREG_ICR);
   5198 	if (wm_is_using_msix(sc)) {
   5199 		if (sc->sc_type != WM_T_82574) {
   5200 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5201 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5202 		} else
   5203 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5204 	}
   5205 
   5206 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5207 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5208 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5209 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5210 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5211 		reg |= KABGTXD_BGSQLBIAS;
   5212 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5213 	}
   5214 
   5215 	/* Reload sc_ctrl */
   5216 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5217 
   5218 	wm_set_eee(sc);
   5219 
   5220 	/*
   5221 	 * For PCH, this write will make sure that any noise will be detected
   5222 	 * as a CRC error and be dropped rather than show up as a bad packet
   5223 	 * to the DMA engine
   5224 	 */
   5225 	if (sc->sc_type == WM_T_PCH)
   5226 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5227 
   5228 	if (sc->sc_type >= WM_T_82544)
   5229 		CSR_WRITE(sc, WMREG_WUC, 0);
   5230 
   5231 	if (sc->sc_type < WM_T_82575)
   5232 		wm_disable_aspm(sc); /* Workaround for some chips */
   5233 
   5234 	wm_reset_mdicnfg_82580(sc);
   5235 
   5236 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5237 		wm_pll_workaround_i210(sc);
   5238 
   5239 	if (sc->sc_type == WM_T_80003) {
   5240 		/* Default to TRUE to enable the MDIC W/A */
   5241 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5242 
   5243 		rv = wm_kmrn_readreg(sc,
   5244 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5245 		if (rv == 0) {
   5246 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5247 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5248 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5249 			else
   5250 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5251 		}
   5252 	}
   5253 }
   5254 
   5255 /*
   5256  * wm_add_rxbuf:
   5257  *
   5258  *	Add a receive buffer to the indiciated descriptor.
   5259  */
   5260 static int
   5261 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5262 {
   5263 	struct wm_softc *sc = rxq->rxq_sc;
   5264 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5265 	struct mbuf *m;
   5266 	int error;
   5267 
   5268 	KASSERT(mutex_owned(rxq->rxq_lock));
   5269 
   5270 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5271 	if (m == NULL)
   5272 		return ENOBUFS;
   5273 
   5274 	MCLGET(m, M_DONTWAIT);
   5275 	if ((m->m_flags & M_EXT) == 0) {
   5276 		m_freem(m);
   5277 		return ENOBUFS;
   5278 	}
   5279 
   5280 	if (rxs->rxs_mbuf != NULL)
   5281 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5282 
   5283 	rxs->rxs_mbuf = m;
   5284 
   5285 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5286 	/*
   5287 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5288 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5289 	 */
   5290 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5291 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5292 	if (error) {
   5293 		/* XXX XXX XXX */
   5294 		aprint_error_dev(sc->sc_dev,
   5295 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5296 		panic("wm_add_rxbuf");
   5297 	}
   5298 
   5299 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5300 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5301 
   5302 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5303 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5304 			wm_init_rxdesc(rxq, idx);
   5305 	} else
   5306 		wm_init_rxdesc(rxq, idx);
   5307 
   5308 	return 0;
   5309 }
   5310 
   5311 /*
   5312  * wm_rxdrain:
   5313  *
   5314  *	Drain the receive queue.
   5315  */
   5316 static void
   5317 wm_rxdrain(struct wm_rxqueue *rxq)
   5318 {
   5319 	struct wm_softc *sc = rxq->rxq_sc;
   5320 	struct wm_rxsoft *rxs;
   5321 	int i;
   5322 
   5323 	KASSERT(mutex_owned(rxq->rxq_lock));
   5324 
   5325 	for (i = 0; i < WM_NRXDESC; i++) {
   5326 		rxs = &rxq->rxq_soft[i];
   5327 		if (rxs->rxs_mbuf != NULL) {
   5328 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5329 			m_freem(rxs->rxs_mbuf);
   5330 			rxs->rxs_mbuf = NULL;
   5331 		}
   5332 	}
   5333 }
   5334 
   5335 /*
   5336  * Setup registers for RSS.
   5337  *
   5338  * XXX not yet VMDq support
   5339  */
   5340 static void
   5341 wm_init_rss(struct wm_softc *sc)
   5342 {
   5343 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5344 	int i;
   5345 
   5346 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5347 
   5348 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5349 		unsigned int qid, reta_ent;
   5350 
   5351 		qid  = i % sc->sc_nqueues;
   5352 		switch (sc->sc_type) {
   5353 		case WM_T_82574:
   5354 			reta_ent = __SHIFTIN(qid,
   5355 			    RETA_ENT_QINDEX_MASK_82574);
   5356 			break;
   5357 		case WM_T_82575:
   5358 			reta_ent = __SHIFTIN(qid,
   5359 			    RETA_ENT_QINDEX1_MASK_82575);
   5360 			break;
   5361 		default:
   5362 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5363 			break;
   5364 		}
   5365 
   5366 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5367 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5368 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5369 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5370 	}
   5371 
   5372 	rss_getkey((uint8_t *)rss_key);
   5373 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5374 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5375 
   5376 	if (sc->sc_type == WM_T_82574)
   5377 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5378 	else
   5379 		mrqc = MRQC_ENABLE_RSS_MQ;
   5380 
   5381 	/*
   5382 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5383 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5384 	 */
   5385 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5386 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5387 #if 0
   5388 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5389 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   5390 #endif
   5391 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   5392 
   5393 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5394 }
   5395 
   5396 /*
   5397  * Adjust TX and RX queue numbers which the system actulally uses.
   5398  *
   5399  * The numbers are affected by below parameters.
   5400  *     - The nubmer of hardware queues
   5401  *     - The number of MSI-X vectors (= "nvectors" argument)
   5402  *     - ncpu
   5403  */
   5404 static void
   5405 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5406 {
   5407 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5408 
   5409 	if (nvectors < 2) {
   5410 		sc->sc_nqueues = 1;
   5411 		return;
   5412 	}
   5413 
   5414 	switch (sc->sc_type) {
   5415 	case WM_T_82572:
   5416 		hw_ntxqueues = 2;
   5417 		hw_nrxqueues = 2;
   5418 		break;
   5419 	case WM_T_82574:
   5420 		hw_ntxqueues = 2;
   5421 		hw_nrxqueues = 2;
   5422 		break;
   5423 	case WM_T_82575:
   5424 		hw_ntxqueues = 4;
   5425 		hw_nrxqueues = 4;
   5426 		break;
   5427 	case WM_T_82576:
   5428 		hw_ntxqueues = 16;
   5429 		hw_nrxqueues = 16;
   5430 		break;
   5431 	case WM_T_82580:
   5432 	case WM_T_I350:
   5433 	case WM_T_I354:
   5434 		hw_ntxqueues = 8;
   5435 		hw_nrxqueues = 8;
   5436 		break;
   5437 	case WM_T_I210:
   5438 		hw_ntxqueues = 4;
   5439 		hw_nrxqueues = 4;
   5440 		break;
   5441 	case WM_T_I211:
   5442 		hw_ntxqueues = 2;
   5443 		hw_nrxqueues = 2;
   5444 		break;
   5445 		/*
   5446 		 * As below ethernet controllers does not support MSI-X,
   5447 		 * this driver let them not use multiqueue.
   5448 		 *     - WM_T_80003
   5449 		 *     - WM_T_ICH8
   5450 		 *     - WM_T_ICH9
   5451 		 *     - WM_T_ICH10
   5452 		 *     - WM_T_PCH
   5453 		 *     - WM_T_PCH2
   5454 		 *     - WM_T_PCH_LPT
   5455 		 */
   5456 	default:
   5457 		hw_ntxqueues = 1;
   5458 		hw_nrxqueues = 1;
   5459 		break;
   5460 	}
   5461 
   5462 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5463 
   5464 	/*
   5465 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5466 	 * the number of queues used actually.
   5467 	 */
   5468 	if (nvectors < hw_nqueues + 1)
   5469 		sc->sc_nqueues = nvectors - 1;
   5470 	else
   5471 		sc->sc_nqueues = hw_nqueues;
   5472 
   5473 	/*
   5474 	 * As queues more then cpus cannot improve scaling, we limit
   5475 	 * the number of queues used actually.
   5476 	 */
   5477 	if (ncpu < sc->sc_nqueues)
   5478 		sc->sc_nqueues = ncpu;
   5479 }
   5480 
   5481 static inline bool
   5482 wm_is_using_msix(struct wm_softc *sc)
   5483 {
   5484 
   5485 	return (sc->sc_nintrs > 1);
   5486 }
   5487 
   5488 static inline bool
   5489 wm_is_using_multiqueue(struct wm_softc *sc)
   5490 {
   5491 
   5492 	return (sc->sc_nqueues > 1);
   5493 }
   5494 
   5495 static int
   5496 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   5497 {
   5498 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5499 
   5500 	wmq->wmq_id = qidx;
   5501 	wmq->wmq_intr_idx = intr_idx;
   5502 	wmq->wmq_si = softint_establish(SOFTINT_NET | WM_SOFTINT_FLAGS,
   5503 	    wm_handle_queue, wmq);
   5504 	if (wmq->wmq_si != NULL)
   5505 		return 0;
   5506 
   5507 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5508 	    wmq->wmq_id);
   5509 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5510 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5511 	return ENOMEM;
   5512 }
   5513 
   5514 /*
   5515  * Both single interrupt MSI and INTx can use this function.
   5516  */
   5517 static int
   5518 wm_setup_legacy(struct wm_softc *sc)
   5519 {
   5520 	pci_chipset_tag_t pc = sc->sc_pc;
   5521 	const char *intrstr = NULL;
   5522 	char intrbuf[PCI_INTRSTR_LEN];
   5523 	int error;
   5524 
   5525 	error = wm_alloc_txrx_queues(sc);
   5526 	if (error) {
   5527 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5528 		    error);
   5529 		return ENOMEM;
   5530 	}
   5531 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5532 	    sizeof(intrbuf));
   5533 #ifdef WM_MPSAFE
   5534 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5535 #endif
   5536 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5537 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5538 	if (sc->sc_ihs[0] == NULL) {
   5539 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5540 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5541 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5542 		return ENOMEM;
   5543 	}
   5544 
   5545 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5546 	sc->sc_nintrs = 1;
   5547 
   5548 	return wm_softint_establish_queue(sc, 0, 0);
   5549 }
   5550 
   5551 static int
   5552 wm_setup_msix(struct wm_softc *sc)
   5553 {
   5554 	void *vih;
   5555 	kcpuset_t *affinity;
   5556 	int qidx, error, intr_idx, txrx_established;
   5557 	pci_chipset_tag_t pc = sc->sc_pc;
   5558 	const char *intrstr = NULL;
   5559 	char intrbuf[PCI_INTRSTR_LEN];
   5560 	char intr_xname[INTRDEVNAMEBUF];
   5561 
   5562 	if (sc->sc_nqueues < ncpu) {
   5563 		/*
   5564 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5565 		 * interrupts start from CPU#1.
   5566 		 */
   5567 		sc->sc_affinity_offset = 1;
   5568 	} else {
   5569 		/*
   5570 		 * In this case, this device use all CPUs. So, we unify
   5571 		 * affinitied cpu_index to msix vector number for readability.
   5572 		 */
   5573 		sc->sc_affinity_offset = 0;
   5574 	}
   5575 
   5576 	error = wm_alloc_txrx_queues(sc);
   5577 	if (error) {
   5578 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5579 		    error);
   5580 		return ENOMEM;
   5581 	}
   5582 
   5583 	kcpuset_create(&affinity, false);
   5584 	intr_idx = 0;
   5585 
   5586 	/*
   5587 	 * TX and RX
   5588 	 */
   5589 	txrx_established = 0;
   5590 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5591 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5592 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5593 
   5594 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5595 		    sizeof(intrbuf));
   5596 #ifdef WM_MPSAFE
   5597 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5598 		    PCI_INTR_MPSAFE, true);
   5599 #endif
   5600 		memset(intr_xname, 0, sizeof(intr_xname));
   5601 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5602 		    device_xname(sc->sc_dev), qidx);
   5603 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5604 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5605 		if (vih == NULL) {
   5606 			aprint_error_dev(sc->sc_dev,
   5607 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5608 			    intrstr ? " at " : "",
   5609 			    intrstr ? intrstr : "");
   5610 
   5611 			goto fail;
   5612 		}
   5613 		kcpuset_zero(affinity);
   5614 		/* Round-robin affinity */
   5615 		kcpuset_set(affinity, affinity_to);
   5616 		error = interrupt_distribute(vih, affinity, NULL);
   5617 		if (error == 0) {
   5618 			aprint_normal_dev(sc->sc_dev,
   5619 			    "for TX and RX interrupting at %s affinity to %u\n",
   5620 			    intrstr, affinity_to);
   5621 		} else {
   5622 			aprint_normal_dev(sc->sc_dev,
   5623 			    "for TX and RX interrupting at %s\n", intrstr);
   5624 		}
   5625 		sc->sc_ihs[intr_idx] = vih;
   5626 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   5627 			goto fail;
   5628 		txrx_established++;
   5629 		intr_idx++;
   5630 	}
   5631 
   5632 	/* LINK */
   5633 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5634 	    sizeof(intrbuf));
   5635 #ifdef WM_MPSAFE
   5636 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5637 #endif
   5638 	memset(intr_xname, 0, sizeof(intr_xname));
   5639 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5640 	    device_xname(sc->sc_dev));
   5641 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5642 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5643 	if (vih == NULL) {
   5644 		aprint_error_dev(sc->sc_dev,
   5645 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5646 		    intrstr ? " at " : "",
   5647 		    intrstr ? intrstr : "");
   5648 
   5649 		goto fail;
   5650 	}
   5651 	/* Keep default affinity to LINK interrupt */
   5652 	aprint_normal_dev(sc->sc_dev,
   5653 	    "for LINK interrupting at %s\n", intrstr);
   5654 	sc->sc_ihs[intr_idx] = vih;
   5655 	sc->sc_link_intr_idx = intr_idx;
   5656 
   5657 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5658 	kcpuset_destroy(affinity);
   5659 	return 0;
   5660 
   5661  fail:
   5662 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5663 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5664 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5665 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5666 	}
   5667 
   5668 	kcpuset_destroy(affinity);
   5669 	return ENOMEM;
   5670 }
   5671 
   5672 static void
   5673 wm_unset_stopping_flags(struct wm_softc *sc)
   5674 {
   5675 	int i;
   5676 
   5677 	KASSERT(WM_CORE_LOCKED(sc));
   5678 
   5679 	/* Must unset stopping flags in ascending order. */
   5680 	for (i = 0; i < sc->sc_nqueues; i++) {
   5681 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5682 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5683 
   5684 		mutex_enter(txq->txq_lock);
   5685 		txq->txq_stopping = false;
   5686 		mutex_exit(txq->txq_lock);
   5687 
   5688 		mutex_enter(rxq->rxq_lock);
   5689 		rxq->rxq_stopping = false;
   5690 		mutex_exit(rxq->rxq_lock);
   5691 	}
   5692 
   5693 	sc->sc_core_stopping = false;
   5694 }
   5695 
   5696 static void
   5697 wm_set_stopping_flags(struct wm_softc *sc)
   5698 {
   5699 	int i;
   5700 
   5701 	KASSERT(WM_CORE_LOCKED(sc));
   5702 
   5703 	sc->sc_core_stopping = true;
   5704 
   5705 	/* Must set stopping flags in ascending order. */
   5706 	for (i = 0; i < sc->sc_nqueues; i++) {
   5707 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5708 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5709 
   5710 		mutex_enter(rxq->rxq_lock);
   5711 		rxq->rxq_stopping = true;
   5712 		mutex_exit(rxq->rxq_lock);
   5713 
   5714 		mutex_enter(txq->txq_lock);
   5715 		txq->txq_stopping = true;
   5716 		mutex_exit(txq->txq_lock);
   5717 	}
   5718 }
   5719 
   5720 /*
   5721  * Write interrupt interval value to ITR or EITR
   5722  */
   5723 static void
   5724 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5725 {
   5726 
   5727 	if (!wmq->wmq_set_itr)
   5728 		return;
   5729 
   5730 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5731 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5732 
   5733 		/*
   5734 		 * 82575 doesn't have CNT_INGR field.
   5735 		 * So, overwrite counter field by software.
   5736 		 */
   5737 		if (sc->sc_type == WM_T_82575)
   5738 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5739 		else
   5740 			eitr |= EITR_CNT_INGR;
   5741 
   5742 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5743 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5744 		/*
   5745 		 * 82574 has both ITR and EITR. SET EITR when we use
   5746 		 * the multi queue function with MSI-X.
   5747 		 */
   5748 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5749 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5750 	} else {
   5751 		KASSERT(wmq->wmq_id == 0);
   5752 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5753 	}
   5754 
   5755 	wmq->wmq_set_itr = false;
   5756 }
   5757 
   5758 /*
   5759  * TODO
   5760  * Below dynamic calculation of itr is almost the same as linux igb,
   5761  * however it does not fit to wm(4). So, we will have been disable AIM
   5762  * until we will find appropriate calculation of itr.
   5763  */
   5764 /*
   5765  * calculate interrupt interval value to be going to write register in
   5766  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5767  */
   5768 static void
   5769 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5770 {
   5771 #ifdef NOTYET
   5772 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5773 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5774 	uint32_t avg_size = 0;
   5775 	uint32_t new_itr;
   5776 
   5777 	if (rxq->rxq_packets)
   5778 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5779 	if (txq->txq_packets)
   5780 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5781 
   5782 	if (avg_size == 0) {
   5783 		new_itr = 450; /* restore default value */
   5784 		goto out;
   5785 	}
   5786 
   5787 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5788 	avg_size += 24;
   5789 
   5790 	/* Don't starve jumbo frames */
   5791 	avg_size = uimin(avg_size, 3000);
   5792 
   5793 	/* Give a little boost to mid-size frames */
   5794 	if ((avg_size > 300) && (avg_size < 1200))
   5795 		new_itr = avg_size / 3;
   5796 	else
   5797 		new_itr = avg_size / 2;
   5798 
   5799 out:
   5800 	/*
   5801 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5802 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5803 	 */
   5804 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5805 		new_itr *= 4;
   5806 
   5807 	if (new_itr != wmq->wmq_itr) {
   5808 		wmq->wmq_itr = new_itr;
   5809 		wmq->wmq_set_itr = true;
   5810 	} else
   5811 		wmq->wmq_set_itr = false;
   5812 
   5813 	rxq->rxq_packets = 0;
   5814 	rxq->rxq_bytes = 0;
   5815 	txq->txq_packets = 0;
   5816 	txq->txq_bytes = 0;
   5817 #endif
   5818 }
   5819 
   5820 static void
   5821 wm_init_sysctls(struct wm_softc *sc)
   5822 {
   5823 	struct sysctllog **log;
   5824 	const struct sysctlnode *rnode, *cnode;
   5825 	int rv;
   5826 	const char *dvname;
   5827 
   5828 	log = &sc->sc_sysctllog;
   5829 	dvname = device_xname(sc->sc_dev);
   5830 
   5831 	rv = sysctl_createv(log, 0, NULL, &rnode,
   5832 	    0, CTLTYPE_NODE, dvname,
   5833 	    SYSCTL_DESCR("wm information and settings"),
   5834 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   5835 	if (rv != 0)
   5836 		goto err;
   5837 
   5838 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   5839 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   5840 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   5841 	if (rv != 0)
   5842 		goto teardown;
   5843 
   5844 #ifdef WM_DEBUG
   5845 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   5846 	    CTLTYPE_INT, "debug_flags",
   5847 	    SYSCTL_DESCR(
   5848 		    "Debug flags:\n"	\
   5849 		    "\t0x01 LINK\n"	\
   5850 		    "\t0x02 TX\n"	\
   5851 		    "\t0x04 RX\n"	\
   5852 		    "\t0x08 GMII\n"	\
   5853 		    "\t0x10 MANAGE\n"	\
   5854 		    "\t0x20 NVM\n"	\
   5855 		    "\t0x40 INIT\n"	\
   5856 		    "\t0x80 LOCK"),
   5857 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
   5858 	if (rv != 0)
   5859 		goto teardown;
   5860 #endif
   5861 
   5862 	return;
   5863 
   5864 teardown:
   5865 	sysctl_teardown(log);
   5866 err:
   5867 	sc->sc_sysctllog = NULL;
   5868 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   5869 	    __func__, rv);
   5870 }
   5871 
   5872 /*
   5873  * wm_init:		[ifnet interface function]
   5874  *
   5875  *	Initialize the interface.
   5876  */
   5877 static int
   5878 wm_init(struct ifnet *ifp)
   5879 {
   5880 	struct wm_softc *sc = ifp->if_softc;
   5881 	int ret;
   5882 
   5883 	WM_CORE_LOCK(sc);
   5884 	ret = wm_init_locked(ifp);
   5885 	WM_CORE_UNLOCK(sc);
   5886 
   5887 	return ret;
   5888 }
   5889 
   5890 static int
   5891 wm_init_locked(struct ifnet *ifp)
   5892 {
   5893 	struct wm_softc *sc = ifp->if_softc;
   5894 	struct ethercom *ec = &sc->sc_ethercom;
   5895 	int i, j, trynum, error = 0;
   5896 	uint32_t reg, sfp_mask = 0;
   5897 
   5898 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5899 		device_xname(sc->sc_dev), __func__));
   5900 	KASSERT(WM_CORE_LOCKED(sc));
   5901 
   5902 	/*
   5903 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5904 	 * There is a small but measurable benefit to avoiding the adjusment
   5905 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5906 	 * on such platforms.  One possibility is that the DMA itself is
   5907 	 * slightly more efficient if the front of the entire packet (instead
   5908 	 * of the front of the headers) is aligned.
   5909 	 *
   5910 	 * Note we must always set align_tweak to 0 if we are using
   5911 	 * jumbo frames.
   5912 	 */
   5913 #ifdef __NO_STRICT_ALIGNMENT
   5914 	sc->sc_align_tweak = 0;
   5915 #else
   5916 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5917 		sc->sc_align_tweak = 0;
   5918 	else
   5919 		sc->sc_align_tweak = 2;
   5920 #endif /* __NO_STRICT_ALIGNMENT */
   5921 
   5922 	/* Cancel any pending I/O. */
   5923 	wm_stop_locked(ifp, false, false);
   5924 
   5925 	/* Update statistics before reset */
   5926 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   5927 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   5928 
   5929 	/* PCH_SPT hardware workaround */
   5930 	if (sc->sc_type == WM_T_PCH_SPT)
   5931 		wm_flush_desc_rings(sc);
   5932 
   5933 	/* Reset the chip to a known state. */
   5934 	wm_reset(sc);
   5935 
   5936 	/*
   5937 	 * AMT based hardware can now take control from firmware
   5938 	 * Do this after reset.
   5939 	 */
   5940 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5941 		wm_get_hw_control(sc);
   5942 
   5943 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5944 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5945 		wm_legacy_irq_quirk_spt(sc);
   5946 
   5947 	/* Init hardware bits */
   5948 	wm_initialize_hardware_bits(sc);
   5949 
   5950 	/* Reset the PHY. */
   5951 	if (sc->sc_flags & WM_F_HAS_MII)
   5952 		wm_gmii_reset(sc);
   5953 
   5954 	if (sc->sc_type >= WM_T_ICH8) {
   5955 		reg = CSR_READ(sc, WMREG_GCR);
   5956 		/*
   5957 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   5958 		 * default after reset.
   5959 		 */
   5960 		if (sc->sc_type == WM_T_ICH8)
   5961 			reg |= GCR_NO_SNOOP_ALL;
   5962 		else
   5963 			reg &= ~GCR_NO_SNOOP_ALL;
   5964 		CSR_WRITE(sc, WMREG_GCR, reg);
   5965 	}
   5966 
   5967 	if ((sc->sc_type >= WM_T_ICH8)
   5968 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   5969 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   5970 
   5971 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5972 		reg |= CTRL_EXT_RO_DIS;
   5973 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5974 	}
   5975 
   5976 	/* Calculate (E)ITR value */
   5977 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5978 		/*
   5979 		 * For NEWQUEUE's EITR (except for 82575).
   5980 		 * 82575's EITR should be set same throttling value as other
   5981 		 * old controllers' ITR because the interrupt/sec calculation
   5982 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5983 		 *
   5984 		 * 82574's EITR should be set same throttling value as ITR.
   5985 		 *
   5986 		 * For N interrupts/sec, set this value to:
   5987 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5988 		 */
   5989 		sc->sc_itr_init = 450;
   5990 	} else if (sc->sc_type >= WM_T_82543) {
   5991 		/*
   5992 		 * Set up the interrupt throttling register (units of 256ns)
   5993 		 * Note that a footnote in Intel's documentation says this
   5994 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5995 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5996 		 * that that is also true for the 1024ns units of the other
   5997 		 * interrupt-related timer registers -- so, really, we ought
   5998 		 * to divide this value by 4 when the link speed is low.
   5999 		 *
   6000 		 * XXX implement this division at link speed change!
   6001 		 */
   6002 
   6003 		/*
   6004 		 * For N interrupts/sec, set this value to:
   6005 		 * 1,000,000,000 / (N * 256).  Note that we set the
   6006 		 * absolute and packet timer values to this value
   6007 		 * divided by 4 to get "simple timer" behavior.
   6008 		 */
   6009 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   6010 	}
   6011 
   6012 	error = wm_init_txrx_queues(sc);
   6013 	if (error)
   6014 		goto out;
   6015 
   6016 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   6017 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   6018 	    (sc->sc_type >= WM_T_82575))
   6019 		wm_serdes_power_up_link_82575(sc);
   6020 
   6021 	/* Clear out the VLAN table -- we don't use it (yet). */
   6022 	CSR_WRITE(sc, WMREG_VET, 0);
   6023 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   6024 		trynum = 10; /* Due to hw errata */
   6025 	else
   6026 		trynum = 1;
   6027 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   6028 		for (j = 0; j < trynum; j++)
   6029 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   6030 
   6031 	/*
   6032 	 * Set up flow-control parameters.
   6033 	 *
   6034 	 * XXX Values could probably stand some tuning.
   6035 	 */
   6036 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   6037 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   6038 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   6039 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   6040 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   6041 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   6042 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   6043 	}
   6044 
   6045 	sc->sc_fcrtl = FCRTL_DFLT;
   6046 	if (sc->sc_type < WM_T_82543) {
   6047 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   6048 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   6049 	} else {
   6050 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   6051 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   6052 	}
   6053 
   6054 	if (sc->sc_type == WM_T_80003)
   6055 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   6056 	else
   6057 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   6058 
   6059 	/* Writes the control register. */
   6060 	wm_set_vlan(sc);
   6061 
   6062 	if (sc->sc_flags & WM_F_HAS_MII) {
   6063 		uint16_t kmreg;
   6064 
   6065 		switch (sc->sc_type) {
   6066 		case WM_T_80003:
   6067 		case WM_T_ICH8:
   6068 		case WM_T_ICH9:
   6069 		case WM_T_ICH10:
   6070 		case WM_T_PCH:
   6071 		case WM_T_PCH2:
   6072 		case WM_T_PCH_LPT:
   6073 		case WM_T_PCH_SPT:
   6074 		case WM_T_PCH_CNP:
   6075 			/*
   6076 			 * Set the mac to wait the maximum time between each
   6077 			 * iteration and increase the max iterations when
   6078 			 * polling the phy; this fixes erroneous timeouts at
   6079 			 * 10Mbps.
   6080 			 */
   6081 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   6082 			    0xFFFF);
   6083 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6084 			    &kmreg);
   6085 			kmreg |= 0x3F;
   6086 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6087 			    kmreg);
   6088 			break;
   6089 		default:
   6090 			break;
   6091 		}
   6092 
   6093 		if (sc->sc_type == WM_T_80003) {
   6094 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6095 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6096 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6097 
   6098 			/* Bypass RX and TX FIFO's */
   6099 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6100 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6101 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6102 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6103 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6104 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6105 		}
   6106 	}
   6107 #if 0
   6108 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6109 #endif
   6110 
   6111 	/* Set up checksum offload parameters. */
   6112 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6113 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6114 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6115 		reg |= RXCSUM_IPOFL;
   6116 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6117 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6118 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6119 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6120 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6121 
   6122 	/* Set registers about MSI-X */
   6123 	if (wm_is_using_msix(sc)) {
   6124 		uint32_t ivar, qintr_idx;
   6125 		struct wm_queue *wmq;
   6126 		unsigned int qid;
   6127 
   6128 		if (sc->sc_type == WM_T_82575) {
   6129 			/* Interrupt control */
   6130 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6131 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6132 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6133 
   6134 			/* TX and RX */
   6135 			for (i = 0; i < sc->sc_nqueues; i++) {
   6136 				wmq = &sc->sc_queue[i];
   6137 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6138 				    EITR_TX_QUEUE(wmq->wmq_id)
   6139 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6140 			}
   6141 			/* Link status */
   6142 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6143 			    EITR_OTHER);
   6144 		} else if (sc->sc_type == WM_T_82574) {
   6145 			/* Interrupt control */
   6146 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6147 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6148 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6149 
   6150 			/*
   6151 			 * Workaround issue with spurious interrupts
   6152 			 * in MSI-X mode.
   6153 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6154 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6155 			 */
   6156 			reg = CSR_READ(sc, WMREG_RFCTL);
   6157 			reg |= WMREG_RFCTL_ACKDIS;
   6158 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6159 
   6160 			ivar = 0;
   6161 			/* TX and RX */
   6162 			for (i = 0; i < sc->sc_nqueues; i++) {
   6163 				wmq = &sc->sc_queue[i];
   6164 				qid = wmq->wmq_id;
   6165 				qintr_idx = wmq->wmq_intr_idx;
   6166 
   6167 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6168 				    IVAR_TX_MASK_Q_82574(qid));
   6169 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6170 				    IVAR_RX_MASK_Q_82574(qid));
   6171 			}
   6172 			/* Link status */
   6173 			ivar |= __SHIFTIN((IVAR_VALID_82574
   6174 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   6175 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   6176 		} else {
   6177 			/* Interrupt control */
   6178 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   6179 			    | GPIE_EIAME | GPIE_PBA);
   6180 
   6181 			switch (sc->sc_type) {
   6182 			case WM_T_82580:
   6183 			case WM_T_I350:
   6184 			case WM_T_I354:
   6185 			case WM_T_I210:
   6186 			case WM_T_I211:
   6187 				/* TX and RX */
   6188 				for (i = 0; i < sc->sc_nqueues; i++) {
   6189 					wmq = &sc->sc_queue[i];
   6190 					qid = wmq->wmq_id;
   6191 					qintr_idx = wmq->wmq_intr_idx;
   6192 
   6193 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6194 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6195 					ivar |= __SHIFTIN((qintr_idx
   6196 						| IVAR_VALID),
   6197 					    IVAR_TX_MASK_Q(qid));
   6198 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6199 					ivar |= __SHIFTIN((qintr_idx
   6200 						| IVAR_VALID),
   6201 					    IVAR_RX_MASK_Q(qid));
   6202 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6203 				}
   6204 				break;
   6205 			case WM_T_82576:
   6206 				/* TX and RX */
   6207 				for (i = 0; i < sc->sc_nqueues; i++) {
   6208 					wmq = &sc->sc_queue[i];
   6209 					qid = wmq->wmq_id;
   6210 					qintr_idx = wmq->wmq_intr_idx;
   6211 
   6212 					ivar = CSR_READ(sc,
   6213 					    WMREG_IVAR_Q_82576(qid));
   6214 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6215 					ivar |= __SHIFTIN((qintr_idx
   6216 						| IVAR_VALID),
   6217 					    IVAR_TX_MASK_Q_82576(qid));
   6218 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6219 					ivar |= __SHIFTIN((qintr_idx
   6220 						| IVAR_VALID),
   6221 					    IVAR_RX_MASK_Q_82576(qid));
   6222 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6223 					    ivar);
   6224 				}
   6225 				break;
   6226 			default:
   6227 				break;
   6228 			}
   6229 
   6230 			/* Link status */
   6231 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6232 			    IVAR_MISC_OTHER);
   6233 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6234 		}
   6235 
   6236 		if (wm_is_using_multiqueue(sc)) {
   6237 			wm_init_rss(sc);
   6238 
   6239 			/*
   6240 			** NOTE: Receive Full-Packet Checksum Offload
   6241 			** is mutually exclusive with Multiqueue. However
   6242 			** this is not the same as TCP/IP checksums which
   6243 			** still work.
   6244 			*/
   6245 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6246 			reg |= RXCSUM_PCSD;
   6247 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6248 		}
   6249 	}
   6250 
   6251 	/* Set up the interrupt registers. */
   6252 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6253 
   6254 	/* Enable SFP module insertion interrupt if it's required */
   6255 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6256 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6257 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6258 		sfp_mask = ICR_GPI(0);
   6259 	}
   6260 
   6261 	if (wm_is_using_msix(sc)) {
   6262 		uint32_t mask;
   6263 		struct wm_queue *wmq;
   6264 
   6265 		switch (sc->sc_type) {
   6266 		case WM_T_82574:
   6267 			mask = 0;
   6268 			for (i = 0; i < sc->sc_nqueues; i++) {
   6269 				wmq = &sc->sc_queue[i];
   6270 				mask |= ICR_TXQ(wmq->wmq_id);
   6271 				mask |= ICR_RXQ(wmq->wmq_id);
   6272 			}
   6273 			mask |= ICR_OTHER;
   6274 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6275 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6276 			break;
   6277 		default:
   6278 			if (sc->sc_type == WM_T_82575) {
   6279 				mask = 0;
   6280 				for (i = 0; i < sc->sc_nqueues; i++) {
   6281 					wmq = &sc->sc_queue[i];
   6282 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6283 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6284 				}
   6285 				mask |= EITR_OTHER;
   6286 			} else {
   6287 				mask = 0;
   6288 				for (i = 0; i < sc->sc_nqueues; i++) {
   6289 					wmq = &sc->sc_queue[i];
   6290 					mask |= 1 << wmq->wmq_intr_idx;
   6291 				}
   6292 				mask |= 1 << sc->sc_link_intr_idx;
   6293 			}
   6294 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6295 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6296 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6297 
   6298 			/* For other interrupts */
   6299 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6300 			break;
   6301 		}
   6302 	} else {
   6303 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6304 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6305 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6306 	}
   6307 
   6308 	/* Set up the inter-packet gap. */
   6309 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6310 
   6311 	if (sc->sc_type >= WM_T_82543) {
   6312 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6313 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6314 			wm_itrs_writereg(sc, wmq);
   6315 		}
   6316 		/*
   6317 		 * Link interrupts occur much less than TX
   6318 		 * interrupts and RX interrupts. So, we don't
   6319 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6320 		 * FreeBSD's if_igb.
   6321 		 */
   6322 	}
   6323 
   6324 	/* Set the VLAN ethernetype. */
   6325 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6326 
   6327 	/*
   6328 	 * Set up the transmit control register; we start out with
   6329 	 * a collision distance suitable for FDX, but update it whe
   6330 	 * we resolve the media type.
   6331 	 */
   6332 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6333 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6334 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6335 	if (sc->sc_type >= WM_T_82571)
   6336 		sc->sc_tctl |= TCTL_MULR;
   6337 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6338 
   6339 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6340 		/* Write TDT after TCTL.EN is set. See the document. */
   6341 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6342 	}
   6343 
   6344 	if (sc->sc_type == WM_T_80003) {
   6345 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6346 		reg &= ~TCTL_EXT_GCEX_MASK;
   6347 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6348 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6349 	}
   6350 
   6351 	/* Set the media. */
   6352 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6353 		goto out;
   6354 
   6355 	/* Configure for OS presence */
   6356 	wm_init_manageability(sc);
   6357 
   6358 	/*
   6359 	 * Set up the receive control register; we actually program the
   6360 	 * register when we set the receive filter. Use multicast address
   6361 	 * offset type 0.
   6362 	 *
   6363 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6364 	 * don't enable that feature.
   6365 	 */
   6366 	sc->sc_mchash_type = 0;
   6367 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6368 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6369 
   6370 	/* 82574 use one buffer extended Rx descriptor. */
   6371 	if (sc->sc_type == WM_T_82574)
   6372 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6373 
   6374 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
   6375 		sc->sc_rctl |= RCTL_SECRC;
   6376 
   6377 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6378 	    && (ifp->if_mtu > ETHERMTU)) {
   6379 		sc->sc_rctl |= RCTL_LPE;
   6380 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6381 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6382 	}
   6383 
   6384 	if (MCLBYTES == 2048)
   6385 		sc->sc_rctl |= RCTL_2k;
   6386 	else {
   6387 		if (sc->sc_type >= WM_T_82543) {
   6388 			switch (MCLBYTES) {
   6389 			case 4096:
   6390 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6391 				break;
   6392 			case 8192:
   6393 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6394 				break;
   6395 			case 16384:
   6396 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6397 				break;
   6398 			default:
   6399 				panic("wm_init: MCLBYTES %d unsupported",
   6400 				    MCLBYTES);
   6401 				break;
   6402 			}
   6403 		} else
   6404 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6405 	}
   6406 
   6407 	/* Enable ECC */
   6408 	switch (sc->sc_type) {
   6409 	case WM_T_82571:
   6410 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6411 		reg |= PBA_ECC_CORR_EN;
   6412 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6413 		break;
   6414 	case WM_T_PCH_LPT:
   6415 	case WM_T_PCH_SPT:
   6416 	case WM_T_PCH_CNP:
   6417 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6418 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6419 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6420 
   6421 		sc->sc_ctrl |= CTRL_MEHE;
   6422 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6423 		break;
   6424 	default:
   6425 		break;
   6426 	}
   6427 
   6428 	/*
   6429 	 * Set the receive filter.
   6430 	 *
   6431 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6432 	 * the setting of RCTL.EN in wm_set_filter()
   6433 	 */
   6434 	wm_set_filter(sc);
   6435 
   6436 	/* On 575 and later set RDT only if RX enabled */
   6437 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6438 		int qidx;
   6439 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6440 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6441 			for (i = 0; i < WM_NRXDESC; i++) {
   6442 				mutex_enter(rxq->rxq_lock);
   6443 				wm_init_rxdesc(rxq, i);
   6444 				mutex_exit(rxq->rxq_lock);
   6445 
   6446 			}
   6447 		}
   6448 	}
   6449 
   6450 	wm_unset_stopping_flags(sc);
   6451 
   6452 	/* Start the one second link check clock. */
   6453 	callout_schedule(&sc->sc_tick_ch, hz);
   6454 
   6455 	/* ...all done! */
   6456 	ifp->if_flags |= IFF_RUNNING;
   6457 
   6458  out:
   6459 	/* Save last flags for the callback */
   6460 	sc->sc_if_flags = ifp->if_flags;
   6461 	sc->sc_ec_capenable = ec->ec_capenable;
   6462 	if (error)
   6463 		log(LOG_ERR, "%s: interface not running\n",
   6464 		    device_xname(sc->sc_dev));
   6465 	return error;
   6466 }
   6467 
   6468 /*
   6469  * wm_stop:		[ifnet interface function]
   6470  *
   6471  *	Stop transmission on the interface.
   6472  */
   6473 static void
   6474 wm_stop(struct ifnet *ifp, int disable)
   6475 {
   6476 	struct wm_softc *sc = ifp->if_softc;
   6477 
   6478 	ASSERT_SLEEPABLE();
   6479 
   6480 	WM_CORE_LOCK(sc);
   6481 	wm_stop_locked(ifp, disable ? true : false, true);
   6482 	WM_CORE_UNLOCK(sc);
   6483 
   6484 	/*
   6485 	 * After wm_set_stopping_flags(), it is guaranteed
   6486 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   6487 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   6488 	 * because it can sleep...
   6489 	 * so, call workqueue_wait() here.
   6490 	 */
   6491 	for (int i = 0; i < sc->sc_nqueues; i++)
   6492 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   6493 }
   6494 
   6495 static void
   6496 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   6497 {
   6498 	struct wm_softc *sc = ifp->if_softc;
   6499 	struct wm_txsoft *txs;
   6500 	int i, qidx;
   6501 
   6502 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6503 		device_xname(sc->sc_dev), __func__));
   6504 	KASSERT(WM_CORE_LOCKED(sc));
   6505 
   6506 	wm_set_stopping_flags(sc);
   6507 
   6508 	if (sc->sc_flags & WM_F_HAS_MII) {
   6509 		/* Down the MII. */
   6510 		mii_down(&sc->sc_mii);
   6511 	} else {
   6512 #if 0
   6513 		/* Should we clear PHY's status properly? */
   6514 		wm_reset(sc);
   6515 #endif
   6516 	}
   6517 
   6518 	/* Stop the transmit and receive processes. */
   6519 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6520 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6521 	sc->sc_rctl &= ~RCTL_EN;
   6522 
   6523 	/*
   6524 	 * Clear the interrupt mask to ensure the device cannot assert its
   6525 	 * interrupt line.
   6526 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6527 	 * service any currently pending or shared interrupt.
   6528 	 */
   6529 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6530 	sc->sc_icr = 0;
   6531 	if (wm_is_using_msix(sc)) {
   6532 		if (sc->sc_type != WM_T_82574) {
   6533 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6534 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6535 		} else
   6536 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6537 	}
   6538 
   6539 	/*
   6540 	 * Stop callouts after interrupts are disabled; if we have
   6541 	 * to wait for them, we will be releasing the CORE_LOCK
   6542 	 * briefly, which will unblock interrupts on the current CPU.
   6543 	 */
   6544 
   6545 	/* Stop the one second clock. */
   6546 	if (wait)
   6547 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   6548 	else
   6549 		callout_stop(&sc->sc_tick_ch);
   6550 
   6551 	/* Stop the 82547 Tx FIFO stall check timer. */
   6552 	if (sc->sc_type == WM_T_82547) {
   6553 		if (wait)
   6554 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   6555 		else
   6556 			callout_stop(&sc->sc_txfifo_ch);
   6557 	}
   6558 
   6559 	/* Release any queued transmit buffers. */
   6560 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6561 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6562 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6563 		struct mbuf *m;
   6564 
   6565 		mutex_enter(txq->txq_lock);
   6566 		txq->txq_sending = false; /* Ensure watchdog disabled */
   6567 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6568 			txs = &txq->txq_soft[i];
   6569 			if (txs->txs_mbuf != NULL) {
   6570 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6571 				m_freem(txs->txs_mbuf);
   6572 				txs->txs_mbuf = NULL;
   6573 			}
   6574 		}
   6575 		/* Drain txq_interq */
   6576 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6577 			m_freem(m);
   6578 		mutex_exit(txq->txq_lock);
   6579 	}
   6580 
   6581 	/* Mark the interface as down and cancel the watchdog timer. */
   6582 	ifp->if_flags &= ~IFF_RUNNING;
   6583 
   6584 	if (disable) {
   6585 		for (i = 0; i < sc->sc_nqueues; i++) {
   6586 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6587 			mutex_enter(rxq->rxq_lock);
   6588 			wm_rxdrain(rxq);
   6589 			mutex_exit(rxq->rxq_lock);
   6590 		}
   6591 	}
   6592 
   6593 #if 0 /* notyet */
   6594 	if (sc->sc_type >= WM_T_82544)
   6595 		CSR_WRITE(sc, WMREG_WUC, 0);
   6596 #endif
   6597 }
   6598 
   6599 static void
   6600 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6601 {
   6602 	struct mbuf *m;
   6603 	int i;
   6604 
   6605 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6606 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6607 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6608 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6609 		    m->m_data, m->m_len, m->m_flags);
   6610 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6611 	    i, i == 1 ? "" : "s");
   6612 }
   6613 
   6614 /*
   6615  * wm_82547_txfifo_stall:
   6616  *
   6617  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6618  *	reset the FIFO pointers, and restart packet transmission.
   6619  */
   6620 static void
   6621 wm_82547_txfifo_stall(void *arg)
   6622 {
   6623 	struct wm_softc *sc = arg;
   6624 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6625 
   6626 	mutex_enter(txq->txq_lock);
   6627 
   6628 	if (txq->txq_stopping)
   6629 		goto out;
   6630 
   6631 	if (txq->txq_fifo_stall) {
   6632 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6633 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6634 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6635 			/*
   6636 			 * Packets have drained.  Stop transmitter, reset
   6637 			 * FIFO pointers, restart transmitter, and kick
   6638 			 * the packet queue.
   6639 			 */
   6640 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6641 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6642 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6643 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6644 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6645 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6646 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6647 			CSR_WRITE_FLUSH(sc);
   6648 
   6649 			txq->txq_fifo_head = 0;
   6650 			txq->txq_fifo_stall = 0;
   6651 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6652 		} else {
   6653 			/*
   6654 			 * Still waiting for packets to drain; try again in
   6655 			 * another tick.
   6656 			 */
   6657 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6658 		}
   6659 	}
   6660 
   6661 out:
   6662 	mutex_exit(txq->txq_lock);
   6663 }
   6664 
   6665 /*
   6666  * wm_82547_txfifo_bugchk:
   6667  *
   6668  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6669  *	prevent enqueueing a packet that would wrap around the end
   6670  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6671  *
   6672  *	We do this by checking the amount of space before the end
   6673  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6674  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6675  *	the internal FIFO pointers to the beginning, and restart
   6676  *	transmission on the interface.
   6677  */
   6678 #define	WM_FIFO_HDR		0x10
   6679 #define	WM_82547_PAD_LEN	0x3e0
   6680 static int
   6681 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6682 {
   6683 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6684 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6685 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6686 
   6687 	/* Just return if already stalled. */
   6688 	if (txq->txq_fifo_stall)
   6689 		return 1;
   6690 
   6691 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6692 		/* Stall only occurs in half-duplex mode. */
   6693 		goto send_packet;
   6694 	}
   6695 
   6696 	if (len >= WM_82547_PAD_LEN + space) {
   6697 		txq->txq_fifo_stall = 1;
   6698 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6699 		return 1;
   6700 	}
   6701 
   6702  send_packet:
   6703 	txq->txq_fifo_head += len;
   6704 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6705 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6706 
   6707 	return 0;
   6708 }
   6709 
   6710 static int
   6711 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6712 {
   6713 	int error;
   6714 
   6715 	/*
   6716 	 * Allocate the control data structures, and create and load the
   6717 	 * DMA map for it.
   6718 	 *
   6719 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6720 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6721 	 * both sets within the same 4G segment.
   6722 	 */
   6723 	if (sc->sc_type < WM_T_82544)
   6724 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6725 	else
   6726 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6727 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6728 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6729 	else
   6730 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6731 
   6732 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6733 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6734 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6735 		aprint_error_dev(sc->sc_dev,
   6736 		    "unable to allocate TX control data, error = %d\n",
   6737 		    error);
   6738 		goto fail_0;
   6739 	}
   6740 
   6741 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6742 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6743 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6744 		aprint_error_dev(sc->sc_dev,
   6745 		    "unable to map TX control data, error = %d\n", error);
   6746 		goto fail_1;
   6747 	}
   6748 
   6749 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6750 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6751 		aprint_error_dev(sc->sc_dev,
   6752 		    "unable to create TX control data DMA map, error = %d\n",
   6753 		    error);
   6754 		goto fail_2;
   6755 	}
   6756 
   6757 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6758 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6759 		aprint_error_dev(sc->sc_dev,
   6760 		    "unable to load TX control data DMA map, error = %d\n",
   6761 		    error);
   6762 		goto fail_3;
   6763 	}
   6764 
   6765 	return 0;
   6766 
   6767  fail_3:
   6768 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6769  fail_2:
   6770 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6771 	    WM_TXDESCS_SIZE(txq));
   6772  fail_1:
   6773 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6774  fail_0:
   6775 	return error;
   6776 }
   6777 
   6778 static void
   6779 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6780 {
   6781 
   6782 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6783 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6784 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6785 	    WM_TXDESCS_SIZE(txq));
   6786 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6787 }
   6788 
   6789 static int
   6790 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6791 {
   6792 	int error;
   6793 	size_t rxq_descs_size;
   6794 
   6795 	/*
   6796 	 * Allocate the control data structures, and create and load the
   6797 	 * DMA map for it.
   6798 	 *
   6799 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6800 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6801 	 * both sets within the same 4G segment.
   6802 	 */
   6803 	rxq->rxq_ndesc = WM_NRXDESC;
   6804 	if (sc->sc_type == WM_T_82574)
   6805 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6806 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6807 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6808 	else
   6809 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6810 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6811 
   6812 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6813 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6814 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6815 		aprint_error_dev(sc->sc_dev,
   6816 		    "unable to allocate RX control data, error = %d\n",
   6817 		    error);
   6818 		goto fail_0;
   6819 	}
   6820 
   6821 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6822 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6823 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6824 		aprint_error_dev(sc->sc_dev,
   6825 		    "unable to map RX control data, error = %d\n", error);
   6826 		goto fail_1;
   6827 	}
   6828 
   6829 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6830 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6831 		aprint_error_dev(sc->sc_dev,
   6832 		    "unable to create RX control data DMA map, error = %d\n",
   6833 		    error);
   6834 		goto fail_2;
   6835 	}
   6836 
   6837 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6838 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6839 		aprint_error_dev(sc->sc_dev,
   6840 		    "unable to load RX control data DMA map, error = %d\n",
   6841 		    error);
   6842 		goto fail_3;
   6843 	}
   6844 
   6845 	return 0;
   6846 
   6847  fail_3:
   6848 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6849  fail_2:
   6850 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6851 	    rxq_descs_size);
   6852  fail_1:
   6853 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6854  fail_0:
   6855 	return error;
   6856 }
   6857 
   6858 static void
   6859 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6860 {
   6861 
   6862 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6863 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6864 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6865 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6866 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6867 }
   6868 
   6869 
   6870 static int
   6871 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6872 {
   6873 	int i, error;
   6874 
   6875 	/* Create the transmit buffer DMA maps. */
   6876 	WM_TXQUEUELEN(txq) =
   6877 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6878 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6879 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6880 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6881 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6882 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6883 			aprint_error_dev(sc->sc_dev,
   6884 			    "unable to create Tx DMA map %d, error = %d\n",
   6885 			    i, error);
   6886 			goto fail;
   6887 		}
   6888 	}
   6889 
   6890 	return 0;
   6891 
   6892  fail:
   6893 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6894 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6895 			bus_dmamap_destroy(sc->sc_dmat,
   6896 			    txq->txq_soft[i].txs_dmamap);
   6897 	}
   6898 	return error;
   6899 }
   6900 
   6901 static void
   6902 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6903 {
   6904 	int i;
   6905 
   6906 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6907 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6908 			bus_dmamap_destroy(sc->sc_dmat,
   6909 			    txq->txq_soft[i].txs_dmamap);
   6910 	}
   6911 }
   6912 
   6913 static int
   6914 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6915 {
   6916 	int i, error;
   6917 
   6918 	/* Create the receive buffer DMA maps. */
   6919 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6920 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6921 			    MCLBYTES, 0, 0,
   6922 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6923 			aprint_error_dev(sc->sc_dev,
   6924 			    "unable to create Rx DMA map %d error = %d\n",
   6925 			    i, error);
   6926 			goto fail;
   6927 		}
   6928 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6929 	}
   6930 
   6931 	return 0;
   6932 
   6933  fail:
   6934 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6935 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6936 			bus_dmamap_destroy(sc->sc_dmat,
   6937 			    rxq->rxq_soft[i].rxs_dmamap);
   6938 	}
   6939 	return error;
   6940 }
   6941 
   6942 static void
   6943 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6944 {
   6945 	int i;
   6946 
   6947 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6948 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6949 			bus_dmamap_destroy(sc->sc_dmat,
   6950 			    rxq->rxq_soft[i].rxs_dmamap);
   6951 	}
   6952 }
   6953 
   6954 /*
   6955  * wm_alloc_quques:
   6956  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6957  */
   6958 static int
   6959 wm_alloc_txrx_queues(struct wm_softc *sc)
   6960 {
   6961 	int i, error, tx_done, rx_done;
   6962 
   6963 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6964 	    KM_SLEEP);
   6965 	if (sc->sc_queue == NULL) {
   6966 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6967 		error = ENOMEM;
   6968 		goto fail_0;
   6969 	}
   6970 
   6971 	/* For transmission */
   6972 	error = 0;
   6973 	tx_done = 0;
   6974 	for (i = 0; i < sc->sc_nqueues; i++) {
   6975 #ifdef WM_EVENT_COUNTERS
   6976 		int j;
   6977 		const char *xname;
   6978 #endif
   6979 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6980 		txq->txq_sc = sc;
   6981 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6982 
   6983 		error = wm_alloc_tx_descs(sc, txq);
   6984 		if (error)
   6985 			break;
   6986 		error = wm_alloc_tx_buffer(sc, txq);
   6987 		if (error) {
   6988 			wm_free_tx_descs(sc, txq);
   6989 			break;
   6990 		}
   6991 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6992 		if (txq->txq_interq == NULL) {
   6993 			wm_free_tx_descs(sc, txq);
   6994 			wm_free_tx_buffer(sc, txq);
   6995 			error = ENOMEM;
   6996 			break;
   6997 		}
   6998 
   6999 #ifdef WM_EVENT_COUNTERS
   7000 		xname = device_xname(sc->sc_dev);
   7001 
   7002 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   7003 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   7004 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   7005 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   7006 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   7007 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   7008 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   7009 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   7010 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   7011 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   7012 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   7013 
   7014 		for (j = 0; j < WM_NTXSEGS; j++) {
   7015 			snprintf(txq->txq_txseg_evcnt_names[j],
   7016 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   7017 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   7018 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   7019 		}
   7020 
   7021 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   7022 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   7023 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   7024 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   7025 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   7026 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   7027 #endif /* WM_EVENT_COUNTERS */
   7028 
   7029 		tx_done++;
   7030 	}
   7031 	if (error)
   7032 		goto fail_1;
   7033 
   7034 	/* For receive */
   7035 	error = 0;
   7036 	rx_done = 0;
   7037 	for (i = 0; i < sc->sc_nqueues; i++) {
   7038 #ifdef WM_EVENT_COUNTERS
   7039 		const char *xname;
   7040 #endif
   7041 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7042 		rxq->rxq_sc = sc;
   7043 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7044 
   7045 		error = wm_alloc_rx_descs(sc, rxq);
   7046 		if (error)
   7047 			break;
   7048 
   7049 		error = wm_alloc_rx_buffer(sc, rxq);
   7050 		if (error) {
   7051 			wm_free_rx_descs(sc, rxq);
   7052 			break;
   7053 		}
   7054 
   7055 #ifdef WM_EVENT_COUNTERS
   7056 		xname = device_xname(sc->sc_dev);
   7057 
   7058 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   7059 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   7060 
   7061 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   7062 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   7063 #endif /* WM_EVENT_COUNTERS */
   7064 
   7065 		rx_done++;
   7066 	}
   7067 	if (error)
   7068 		goto fail_2;
   7069 
   7070 	return 0;
   7071 
   7072  fail_2:
   7073 	for (i = 0; i < rx_done; i++) {
   7074 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7075 		wm_free_rx_buffer(sc, rxq);
   7076 		wm_free_rx_descs(sc, rxq);
   7077 		if (rxq->rxq_lock)
   7078 			mutex_obj_free(rxq->rxq_lock);
   7079 	}
   7080  fail_1:
   7081 	for (i = 0; i < tx_done; i++) {
   7082 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7083 		pcq_destroy(txq->txq_interq);
   7084 		wm_free_tx_buffer(sc, txq);
   7085 		wm_free_tx_descs(sc, txq);
   7086 		if (txq->txq_lock)
   7087 			mutex_obj_free(txq->txq_lock);
   7088 	}
   7089 
   7090 	kmem_free(sc->sc_queue,
   7091 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   7092  fail_0:
   7093 	return error;
   7094 }
   7095 
   7096 /*
   7097  * wm_free_quques:
   7098  *	Free {tx,rx}descs and {tx,rx} buffers
   7099  */
   7100 static void
   7101 wm_free_txrx_queues(struct wm_softc *sc)
   7102 {
   7103 	int i;
   7104 
   7105 	for (i = 0; i < sc->sc_nqueues; i++) {
   7106 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7107 
   7108 #ifdef WM_EVENT_COUNTERS
   7109 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7110 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7111 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7112 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7113 #endif /* WM_EVENT_COUNTERS */
   7114 
   7115 		wm_free_rx_buffer(sc, rxq);
   7116 		wm_free_rx_descs(sc, rxq);
   7117 		if (rxq->rxq_lock)
   7118 			mutex_obj_free(rxq->rxq_lock);
   7119 	}
   7120 
   7121 	for (i = 0; i < sc->sc_nqueues; i++) {
   7122 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7123 		struct mbuf *m;
   7124 #ifdef WM_EVENT_COUNTERS
   7125 		int j;
   7126 
   7127 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7128 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7129 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7130 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7131 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7132 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7133 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7134 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7135 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7136 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7137 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7138 
   7139 		for (j = 0; j < WM_NTXSEGS; j++)
   7140 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7141 
   7142 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7143 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7144 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7145 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7146 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7147 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   7148 #endif /* WM_EVENT_COUNTERS */
   7149 
   7150 		/* Drain txq_interq */
   7151 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7152 			m_freem(m);
   7153 		pcq_destroy(txq->txq_interq);
   7154 
   7155 		wm_free_tx_buffer(sc, txq);
   7156 		wm_free_tx_descs(sc, txq);
   7157 		if (txq->txq_lock)
   7158 			mutex_obj_free(txq->txq_lock);
   7159 	}
   7160 
   7161 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   7162 }
   7163 
   7164 static void
   7165 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7166 {
   7167 
   7168 	KASSERT(mutex_owned(txq->txq_lock));
   7169 
   7170 	/* Initialize the transmit descriptor ring. */
   7171 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   7172 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   7173 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7174 	txq->txq_free = WM_NTXDESC(txq);
   7175 	txq->txq_next = 0;
   7176 }
   7177 
   7178 static void
   7179 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7180     struct wm_txqueue *txq)
   7181 {
   7182 
   7183 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7184 		device_xname(sc->sc_dev), __func__));
   7185 	KASSERT(mutex_owned(txq->txq_lock));
   7186 
   7187 	if (sc->sc_type < WM_T_82543) {
   7188 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   7189 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   7190 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   7191 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   7192 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   7193 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   7194 	} else {
   7195 		int qid = wmq->wmq_id;
   7196 
   7197 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   7198 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   7199 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   7200 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   7201 
   7202 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7203 			/*
   7204 			 * Don't write TDT before TCTL.EN is set.
   7205 			 * See the document.
   7206 			 */
   7207 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7208 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7209 			    | TXDCTL_WTHRESH(0));
   7210 		else {
   7211 			/* XXX should update with AIM? */
   7212 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7213 			if (sc->sc_type >= WM_T_82540) {
   7214 				/* Should be the same */
   7215 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7216 			}
   7217 
   7218 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7219 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7220 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7221 		}
   7222 	}
   7223 }
   7224 
   7225 static void
   7226 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7227 {
   7228 	int i;
   7229 
   7230 	KASSERT(mutex_owned(txq->txq_lock));
   7231 
   7232 	/* Initialize the transmit job descriptors. */
   7233 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7234 		txq->txq_soft[i].txs_mbuf = NULL;
   7235 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7236 	txq->txq_snext = 0;
   7237 	txq->txq_sdirty = 0;
   7238 }
   7239 
   7240 static void
   7241 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7242     struct wm_txqueue *txq)
   7243 {
   7244 
   7245 	KASSERT(mutex_owned(txq->txq_lock));
   7246 
   7247 	/*
   7248 	 * Set up some register offsets that are different between
   7249 	 * the i82542 and the i82543 and later chips.
   7250 	 */
   7251 	if (sc->sc_type < WM_T_82543)
   7252 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7253 	else
   7254 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7255 
   7256 	wm_init_tx_descs(sc, txq);
   7257 	wm_init_tx_regs(sc, wmq, txq);
   7258 	wm_init_tx_buffer(sc, txq);
   7259 
   7260 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7261 	txq->txq_sending = false;
   7262 }
   7263 
   7264 static void
   7265 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7266     struct wm_rxqueue *rxq)
   7267 {
   7268 
   7269 	KASSERT(mutex_owned(rxq->rxq_lock));
   7270 
   7271 	/*
   7272 	 * Initialize the receive descriptor and receive job
   7273 	 * descriptor rings.
   7274 	 */
   7275 	if (sc->sc_type < WM_T_82543) {
   7276 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7277 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7278 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7279 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7280 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7281 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7282 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7283 
   7284 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7285 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7286 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7287 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7288 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7289 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7290 	} else {
   7291 		int qid = wmq->wmq_id;
   7292 
   7293 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7294 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7295 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7296 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7297 
   7298 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7299 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7300 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7301 
   7302 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7303 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7304 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7305 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7306 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7307 			    | RXDCTL_WTHRESH(1));
   7308 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7309 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7310 		} else {
   7311 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7312 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7313 			/* XXX should update with AIM? */
   7314 			CSR_WRITE(sc, WMREG_RDTR,
   7315 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7316 			/* MUST be same */
   7317 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7318 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7319 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7320 		}
   7321 	}
   7322 }
   7323 
   7324 static int
   7325 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7326 {
   7327 	struct wm_rxsoft *rxs;
   7328 	int error, i;
   7329 
   7330 	KASSERT(mutex_owned(rxq->rxq_lock));
   7331 
   7332 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7333 		rxs = &rxq->rxq_soft[i];
   7334 		if (rxs->rxs_mbuf == NULL) {
   7335 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7336 				log(LOG_ERR, "%s: unable to allocate or map "
   7337 				    "rx buffer %d, error = %d\n",
   7338 				    device_xname(sc->sc_dev), i, error);
   7339 				/*
   7340 				 * XXX Should attempt to run with fewer receive
   7341 				 * XXX buffers instead of just failing.
   7342 				 */
   7343 				wm_rxdrain(rxq);
   7344 				return ENOMEM;
   7345 			}
   7346 		} else {
   7347 			/*
   7348 			 * For 82575 and 82576, the RX descriptors must be
   7349 			 * initialized after the setting of RCTL.EN in
   7350 			 * wm_set_filter()
   7351 			 */
   7352 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7353 				wm_init_rxdesc(rxq, i);
   7354 		}
   7355 	}
   7356 	rxq->rxq_ptr = 0;
   7357 	rxq->rxq_discard = 0;
   7358 	WM_RXCHAIN_RESET(rxq);
   7359 
   7360 	return 0;
   7361 }
   7362 
   7363 static int
   7364 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7365     struct wm_rxqueue *rxq)
   7366 {
   7367 
   7368 	KASSERT(mutex_owned(rxq->rxq_lock));
   7369 
   7370 	/*
   7371 	 * Set up some register offsets that are different between
   7372 	 * the i82542 and the i82543 and later chips.
   7373 	 */
   7374 	if (sc->sc_type < WM_T_82543)
   7375 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7376 	else
   7377 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7378 
   7379 	wm_init_rx_regs(sc, wmq, rxq);
   7380 	return wm_init_rx_buffer(sc, rxq);
   7381 }
   7382 
   7383 /*
   7384  * wm_init_quques:
   7385  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7386  */
   7387 static int
   7388 wm_init_txrx_queues(struct wm_softc *sc)
   7389 {
   7390 	int i, error = 0;
   7391 
   7392 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7393 		device_xname(sc->sc_dev), __func__));
   7394 
   7395 	for (i = 0; i < sc->sc_nqueues; i++) {
   7396 		struct wm_queue *wmq = &sc->sc_queue[i];
   7397 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7398 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7399 
   7400 		/*
   7401 		 * TODO
   7402 		 * Currently, use constant variable instead of AIM.
   7403 		 * Furthermore, the interrupt interval of multiqueue which use
   7404 		 * polling mode is less than default value.
   7405 		 * More tuning and AIM are required.
   7406 		 */
   7407 		if (wm_is_using_multiqueue(sc))
   7408 			wmq->wmq_itr = 50;
   7409 		else
   7410 			wmq->wmq_itr = sc->sc_itr_init;
   7411 		wmq->wmq_set_itr = true;
   7412 
   7413 		mutex_enter(txq->txq_lock);
   7414 		wm_init_tx_queue(sc, wmq, txq);
   7415 		mutex_exit(txq->txq_lock);
   7416 
   7417 		mutex_enter(rxq->rxq_lock);
   7418 		error = wm_init_rx_queue(sc, wmq, rxq);
   7419 		mutex_exit(rxq->rxq_lock);
   7420 		if (error)
   7421 			break;
   7422 	}
   7423 
   7424 	return error;
   7425 }
   7426 
   7427 /*
   7428  * wm_tx_offload:
   7429  *
   7430  *	Set up TCP/IP checksumming parameters for the
   7431  *	specified packet.
   7432  */
   7433 static void
   7434 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7435     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7436 {
   7437 	struct mbuf *m0 = txs->txs_mbuf;
   7438 	struct livengood_tcpip_ctxdesc *t;
   7439 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7440 	uint32_t ipcse;
   7441 	struct ether_header *eh;
   7442 	int offset, iphl;
   7443 	uint8_t fields;
   7444 
   7445 	/*
   7446 	 * XXX It would be nice if the mbuf pkthdr had offset
   7447 	 * fields for the protocol headers.
   7448 	 */
   7449 
   7450 	eh = mtod(m0, struct ether_header *);
   7451 	switch (htons(eh->ether_type)) {
   7452 	case ETHERTYPE_IP:
   7453 	case ETHERTYPE_IPV6:
   7454 		offset = ETHER_HDR_LEN;
   7455 		break;
   7456 
   7457 	case ETHERTYPE_VLAN:
   7458 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7459 		break;
   7460 
   7461 	default:
   7462 		/* Don't support this protocol or encapsulation. */
   7463  		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   7464  		txq->txq_last_hw_ipcs = 0;
   7465  		txq->txq_last_hw_tucs = 0;
   7466 		*fieldsp = 0;
   7467 		*cmdp = 0;
   7468 		return;
   7469 	}
   7470 
   7471 	if ((m0->m_pkthdr.csum_flags &
   7472 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7473 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7474 	} else
   7475 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7476 
   7477 	ipcse = offset + iphl - 1;
   7478 
   7479 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7480 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7481 	seg = 0;
   7482 	fields = 0;
   7483 
   7484 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7485 		int hlen = offset + iphl;
   7486 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7487 
   7488 		if (__predict_false(m0->m_len <
   7489 				    (hlen + sizeof(struct tcphdr)))) {
   7490 			/*
   7491 			 * TCP/IP headers are not in the first mbuf; we need
   7492 			 * to do this the slow and painful way. Let's just
   7493 			 * hope this doesn't happen very often.
   7494 			 */
   7495 			struct tcphdr th;
   7496 
   7497 			WM_Q_EVCNT_INCR(txq, tsopain);
   7498 
   7499 			m_copydata(m0, hlen, sizeof(th), &th);
   7500 			if (v4) {
   7501 				struct ip ip;
   7502 
   7503 				m_copydata(m0, offset, sizeof(ip), &ip);
   7504 				ip.ip_len = 0;
   7505 				m_copyback(m0,
   7506 				    offset + offsetof(struct ip, ip_len),
   7507 				    sizeof(ip.ip_len), &ip.ip_len);
   7508 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7509 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7510 			} else {
   7511 				struct ip6_hdr ip6;
   7512 
   7513 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7514 				ip6.ip6_plen = 0;
   7515 				m_copyback(m0,
   7516 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7517 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7518 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7519 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7520 			}
   7521 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7522 			    sizeof(th.th_sum), &th.th_sum);
   7523 
   7524 			hlen += th.th_off << 2;
   7525 		} else {
   7526 			/*
   7527 			 * TCP/IP headers are in the first mbuf; we can do
   7528 			 * this the easy way.
   7529 			 */
   7530 			struct tcphdr *th;
   7531 
   7532 			if (v4) {
   7533 				struct ip *ip =
   7534 				    (void *)(mtod(m0, char *) + offset);
   7535 				th = (void *)(mtod(m0, char *) + hlen);
   7536 
   7537 				ip->ip_len = 0;
   7538 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7539 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7540 			} else {
   7541 				struct ip6_hdr *ip6 =
   7542 				    (void *)(mtod(m0, char *) + offset);
   7543 				th = (void *)(mtod(m0, char *) + hlen);
   7544 
   7545 				ip6->ip6_plen = 0;
   7546 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7547 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7548 			}
   7549 			hlen += th->th_off << 2;
   7550 		}
   7551 
   7552 		if (v4) {
   7553 			WM_Q_EVCNT_INCR(txq, tso);
   7554 			cmdlen |= WTX_TCPIP_CMD_IP;
   7555 		} else {
   7556 			WM_Q_EVCNT_INCR(txq, tso6);
   7557 			ipcse = 0;
   7558 		}
   7559 		cmd |= WTX_TCPIP_CMD_TSE;
   7560 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7561 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7562 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7563 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7564 	}
   7565 
   7566 	/*
   7567 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7568 	 * offload feature, if we load the context descriptor, we
   7569 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7570 	 */
   7571 
   7572 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7573 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7574 	    WTX_TCPIP_IPCSE(ipcse);
   7575 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7576 		WM_Q_EVCNT_INCR(txq, ipsum);
   7577 		fields |= WTX_IXSM;
   7578 	}
   7579 
   7580 	offset += iphl;
   7581 
   7582 	if (m0->m_pkthdr.csum_flags &
   7583 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7584 		WM_Q_EVCNT_INCR(txq, tusum);
   7585 		fields |= WTX_TXSM;
   7586 		tucs = WTX_TCPIP_TUCSS(offset) |
   7587 		    WTX_TCPIP_TUCSO(offset +
   7588 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7589 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7590 	} else if ((m0->m_pkthdr.csum_flags &
   7591 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7592 		WM_Q_EVCNT_INCR(txq, tusum6);
   7593 		fields |= WTX_TXSM;
   7594 		tucs = WTX_TCPIP_TUCSS(offset) |
   7595 		    WTX_TCPIP_TUCSO(offset +
   7596 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7597 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7598 	} else {
   7599 		/* Just initialize it to a valid TCP context. */
   7600 		tucs = WTX_TCPIP_TUCSS(offset) |
   7601 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7602 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7603 	}
   7604 
   7605 	*cmdp = cmd;
   7606 	*fieldsp = fields;
   7607 
   7608 	/*
   7609 	 * We don't have to write context descriptor for every packet
   7610 	 * except for 82574. For 82574, we must write context descriptor
   7611 	 * for every packet when we use two descriptor queues.
   7612 	 *
   7613 	 * The 82574L can only remember the *last* context used
   7614 	 * regardless of queue that it was use for.  We cannot reuse
   7615 	 * contexts on this hardware platform and must generate a new
   7616 	 * context every time.  82574L hardware spec, section 7.2.6,
   7617 	 * second note.
   7618 	 */
   7619 	if (sc->sc_nqueues < 2) {
   7620 		/*
   7621 	 	 *
   7622 	  	 * Setting up new checksum offload context for every
   7623 		 * frames takes a lot of processing time for hardware.
   7624 		 * This also reduces performance a lot for small sized
   7625 		 * frames so avoid it if driver can use previously
   7626 		 * configured checksum offload context.
   7627 		 * For TSO, in theory we can use the same TSO context only if
   7628 		 * frame is the same type(IP/TCP) and the same MSS. However
   7629 		 * checking whether a frame has the same IP/TCP structure is
   7630 		 * hard thing so just ignore that and always restablish a
   7631 		 * new TSO context.
   7632 	  	 */
   7633 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   7634 		    == 0) {
   7635 			if (txq->txq_last_hw_cmd == cmd &&
   7636 			    txq->txq_last_hw_fields == fields &&
   7637 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   7638 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   7639 				WM_Q_EVCNT_INCR(txq, skipcontext);
   7640 				return;
   7641 			}
   7642 		}
   7643 
   7644 	 	txq->txq_last_hw_cmd = cmd;
   7645  		txq->txq_last_hw_fields = fields;
   7646  		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   7647 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   7648 	}
   7649 
   7650 	/* Fill in the context descriptor. */
   7651 	t = (struct livengood_tcpip_ctxdesc *)
   7652 	    &txq->txq_descs[txq->txq_next];
   7653 	t->tcpip_ipcs = htole32(ipcs);
   7654 	t->tcpip_tucs = htole32(tucs);
   7655 	t->tcpip_cmdlen = htole32(cmdlen);
   7656 	t->tcpip_seg = htole32(seg);
   7657 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7658 
   7659 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7660 	txs->txs_ndesc++;
   7661 }
   7662 
   7663 static inline int
   7664 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7665 {
   7666 	struct wm_softc *sc = ifp->if_softc;
   7667 	u_int cpuid = cpu_index(curcpu());
   7668 
   7669 	/*
   7670 	 * Currently, simple distribute strategy.
   7671 	 * TODO:
   7672 	 * distribute by flowid(RSS has value).
   7673 	 */
   7674 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7675 }
   7676 
   7677 /*
   7678  * wm_start:		[ifnet interface function]
   7679  *
   7680  *	Start packet transmission on the interface.
   7681  */
   7682 static void
   7683 wm_start(struct ifnet *ifp)
   7684 {
   7685 	struct wm_softc *sc = ifp->if_softc;
   7686 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7687 
   7688 #ifdef WM_MPSAFE
   7689 	KASSERT(if_is_mpsafe(ifp));
   7690 #endif
   7691 	/*
   7692 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   7693 	 */
   7694 
   7695 	mutex_enter(txq->txq_lock);
   7696 	if (!txq->txq_stopping)
   7697 		wm_start_locked(ifp);
   7698 	mutex_exit(txq->txq_lock);
   7699 }
   7700 
   7701 static void
   7702 wm_start_locked(struct ifnet *ifp)
   7703 {
   7704 	struct wm_softc *sc = ifp->if_softc;
   7705 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7706 
   7707 	wm_send_common_locked(ifp, txq, false);
   7708 }
   7709 
   7710 static int
   7711 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7712 {
   7713 	int qid;
   7714 	struct wm_softc *sc = ifp->if_softc;
   7715 	struct wm_txqueue *txq;
   7716 
   7717 	qid = wm_select_txqueue(ifp, m);
   7718 	txq = &sc->sc_queue[qid].wmq_txq;
   7719 
   7720 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7721 		m_freem(m);
   7722 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7723 		return ENOBUFS;
   7724 	}
   7725 
   7726 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   7727 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   7728 	if (m->m_flags & M_MCAST)
   7729 		if_statinc_ref(nsr, if_omcasts);
   7730 	IF_STAT_PUTREF(ifp);
   7731 
   7732 	if (mutex_tryenter(txq->txq_lock)) {
   7733 		if (!txq->txq_stopping)
   7734 			wm_transmit_locked(ifp, txq);
   7735 		mutex_exit(txq->txq_lock);
   7736 	}
   7737 
   7738 	return 0;
   7739 }
   7740 
   7741 static void
   7742 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7743 {
   7744 
   7745 	wm_send_common_locked(ifp, txq, true);
   7746 }
   7747 
   7748 static void
   7749 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7750     bool is_transmit)
   7751 {
   7752 	struct wm_softc *sc = ifp->if_softc;
   7753 	struct mbuf *m0;
   7754 	struct wm_txsoft *txs;
   7755 	bus_dmamap_t dmamap;
   7756 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7757 	bus_addr_t curaddr;
   7758 	bus_size_t seglen, curlen;
   7759 	uint32_t cksumcmd;
   7760 	uint8_t cksumfields;
   7761 	bool remap = true;
   7762 
   7763 	KASSERT(mutex_owned(txq->txq_lock));
   7764 
   7765 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7766 		return;
   7767 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7768 		return;
   7769 
   7770 	/* Remember the previous number of free descriptors. */
   7771 	ofree = txq->txq_free;
   7772 
   7773 	/*
   7774 	 * Loop through the send queue, setting up transmit descriptors
   7775 	 * until we drain the queue, or use up all available transmit
   7776 	 * descriptors.
   7777 	 */
   7778 	for (;;) {
   7779 		m0 = NULL;
   7780 
   7781 		/* Get a work queue entry. */
   7782 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7783 			wm_txeof(txq, UINT_MAX);
   7784 			if (txq->txq_sfree == 0) {
   7785 				DPRINTF(sc, WM_DEBUG_TX,
   7786 				    ("%s: TX: no free job descriptors\n",
   7787 					device_xname(sc->sc_dev)));
   7788 				WM_Q_EVCNT_INCR(txq, txsstall);
   7789 				break;
   7790 			}
   7791 		}
   7792 
   7793 		/* Grab a packet off the queue. */
   7794 		if (is_transmit)
   7795 			m0 = pcq_get(txq->txq_interq);
   7796 		else
   7797 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7798 		if (m0 == NULL)
   7799 			break;
   7800 
   7801 		DPRINTF(sc, WM_DEBUG_TX,
   7802 		    ("%s: TX: have packet to transmit: %p\n",
   7803 			device_xname(sc->sc_dev), m0));
   7804 
   7805 		txs = &txq->txq_soft[txq->txq_snext];
   7806 		dmamap = txs->txs_dmamap;
   7807 
   7808 		use_tso = (m0->m_pkthdr.csum_flags &
   7809 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7810 
   7811 		/*
   7812 		 * So says the Linux driver:
   7813 		 * The controller does a simple calculation to make sure
   7814 		 * there is enough room in the FIFO before initiating the
   7815 		 * DMA for each buffer. The calc is:
   7816 		 *	4 = ceil(buffer len / MSS)
   7817 		 * To make sure we don't overrun the FIFO, adjust the max
   7818 		 * buffer len if the MSS drops.
   7819 		 */
   7820 		dmamap->dm_maxsegsz =
   7821 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7822 		    ? m0->m_pkthdr.segsz << 2
   7823 		    : WTX_MAX_LEN;
   7824 
   7825 		/*
   7826 		 * Load the DMA map.  If this fails, the packet either
   7827 		 * didn't fit in the allotted number of segments, or we
   7828 		 * were short on resources.  For the too-many-segments
   7829 		 * case, we simply report an error and drop the packet,
   7830 		 * since we can't sanely copy a jumbo packet to a single
   7831 		 * buffer.
   7832 		 */
   7833 retry:
   7834 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7835 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7836 		if (__predict_false(error)) {
   7837 			if (error == EFBIG) {
   7838 				if (remap == true) {
   7839 					struct mbuf *m;
   7840 
   7841 					remap = false;
   7842 					m = m_defrag(m0, M_NOWAIT);
   7843 					if (m != NULL) {
   7844 						WM_Q_EVCNT_INCR(txq, defrag);
   7845 						m0 = m;
   7846 						goto retry;
   7847 					}
   7848 				}
   7849 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7850 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7851 				    "DMA segments, dropping...\n",
   7852 				    device_xname(sc->sc_dev));
   7853 				wm_dump_mbuf_chain(sc, m0);
   7854 				m_freem(m0);
   7855 				continue;
   7856 			}
   7857 			/* Short on resources, just stop for now. */
   7858 			DPRINTF(sc, WM_DEBUG_TX,
   7859 			    ("%s: TX: dmamap load failed: %d\n",
   7860 				device_xname(sc->sc_dev), error));
   7861 			break;
   7862 		}
   7863 
   7864 		segs_needed = dmamap->dm_nsegs;
   7865 		if (use_tso) {
   7866 			/* For sentinel descriptor; see below. */
   7867 			segs_needed++;
   7868 		}
   7869 
   7870 		/*
   7871 		 * Ensure we have enough descriptors free to describe
   7872 		 * the packet. Note, we always reserve one descriptor
   7873 		 * at the end of the ring due to the semantics of the
   7874 		 * TDT register, plus one more in the event we need
   7875 		 * to load offload context.
   7876 		 */
   7877 		if (segs_needed > txq->txq_free - 2) {
   7878 			/*
   7879 			 * Not enough free descriptors to transmit this
   7880 			 * packet.  We haven't committed anything yet,
   7881 			 * so just unload the DMA map, put the packet
   7882 			 * pack on the queue, and punt. Notify the upper
   7883 			 * layer that there are no more slots left.
   7884 			 */
   7885 			DPRINTF(sc, WM_DEBUG_TX,
   7886 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7887 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7888 				segs_needed, txq->txq_free - 1));
   7889 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7890 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7891 			WM_Q_EVCNT_INCR(txq, txdstall);
   7892 			break;
   7893 		}
   7894 
   7895 		/*
   7896 		 * Check for 82547 Tx FIFO bug. We need to do this
   7897 		 * once we know we can transmit the packet, since we
   7898 		 * do some internal FIFO space accounting here.
   7899 		 */
   7900 		if (sc->sc_type == WM_T_82547 &&
   7901 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7902 			DPRINTF(sc, WM_DEBUG_TX,
   7903 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7904 				device_xname(sc->sc_dev)));
   7905 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7906 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7907 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7908 			break;
   7909 		}
   7910 
   7911 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7912 
   7913 		DPRINTF(sc, WM_DEBUG_TX,
   7914 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7915 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7916 
   7917 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7918 
   7919 		/*
   7920 		 * Store a pointer to the packet so that we can free it
   7921 		 * later.
   7922 		 *
   7923 		 * Initially, we consider the number of descriptors the
   7924 		 * packet uses the number of DMA segments.  This may be
   7925 		 * incremented by 1 if we do checksum offload (a descriptor
   7926 		 * is used to set the checksum context).
   7927 		 */
   7928 		txs->txs_mbuf = m0;
   7929 		txs->txs_firstdesc = txq->txq_next;
   7930 		txs->txs_ndesc = segs_needed;
   7931 
   7932 		/* Set up offload parameters for this packet. */
   7933 		if (m0->m_pkthdr.csum_flags &
   7934 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7935 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7936 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7937 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   7938 		} else {
   7939  			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   7940  			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   7941 			cksumcmd = 0;
   7942 			cksumfields = 0;
   7943 		}
   7944 
   7945 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7946 
   7947 		/* Sync the DMA map. */
   7948 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7949 		    BUS_DMASYNC_PREWRITE);
   7950 
   7951 		/* Initialize the transmit descriptor. */
   7952 		for (nexttx = txq->txq_next, seg = 0;
   7953 		     seg < dmamap->dm_nsegs; seg++) {
   7954 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7955 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7956 			     seglen != 0;
   7957 			     curaddr += curlen, seglen -= curlen,
   7958 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7959 				curlen = seglen;
   7960 
   7961 				/*
   7962 				 * So says the Linux driver:
   7963 				 * Work around for premature descriptor
   7964 				 * write-backs in TSO mode.  Append a
   7965 				 * 4-byte sentinel descriptor.
   7966 				 */
   7967 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7968 				    curlen > 8)
   7969 					curlen -= 4;
   7970 
   7971 				wm_set_dma_addr(
   7972 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7973 				txq->txq_descs[nexttx].wtx_cmdlen
   7974 				    = htole32(cksumcmd | curlen);
   7975 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7976 				    = 0;
   7977 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7978 				    = cksumfields;
   7979 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7980 				lasttx = nexttx;
   7981 
   7982 				DPRINTF(sc, WM_DEBUG_TX,
   7983 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7984 					"len %#04zx\n",
   7985 					device_xname(sc->sc_dev), nexttx,
   7986 					(uint64_t)curaddr, curlen));
   7987 			}
   7988 		}
   7989 
   7990 		KASSERT(lasttx != -1);
   7991 
   7992 		/*
   7993 		 * Set up the command byte on the last descriptor of
   7994 		 * the packet. If we're in the interrupt delay window,
   7995 		 * delay the interrupt.
   7996 		 */
   7997 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7998 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7999 
   8000 		/*
   8001 		 * If VLANs are enabled and the packet has a VLAN tag, set
   8002 		 * up the descriptor to encapsulate the packet for us.
   8003 		 *
   8004 		 * This is only valid on the last descriptor of the packet.
   8005 		 */
   8006 		if (vlan_has_tag(m0)) {
   8007 			txq->txq_descs[lasttx].wtx_cmdlen |=
   8008 			    htole32(WTX_CMD_VLE);
   8009 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   8010 			    = htole16(vlan_get_tag(m0));
   8011 		}
   8012 
   8013 		txs->txs_lastdesc = lasttx;
   8014 
   8015 		DPRINTF(sc, WM_DEBUG_TX,
   8016 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8017 			device_xname(sc->sc_dev),
   8018 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8019 
   8020 		/* Sync the descriptors we're using. */
   8021 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8022 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8023 
   8024 		/* Give the packet to the chip. */
   8025 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8026 
   8027 		DPRINTF(sc, WM_DEBUG_TX,
   8028 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8029 
   8030 		DPRINTF(sc, WM_DEBUG_TX,
   8031 		    ("%s: TX: finished transmitting packet, job %d\n",
   8032 			device_xname(sc->sc_dev), txq->txq_snext));
   8033 
   8034 		/* Advance the tx pointer. */
   8035 		txq->txq_free -= txs->txs_ndesc;
   8036 		txq->txq_next = nexttx;
   8037 
   8038 		txq->txq_sfree--;
   8039 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8040 
   8041 		/* Pass the packet to any BPF listeners. */
   8042 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8043 	}
   8044 
   8045 	if (m0 != NULL) {
   8046 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8047 		WM_Q_EVCNT_INCR(txq, descdrop);
   8048 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8049 			__func__));
   8050 		m_freem(m0);
   8051 	}
   8052 
   8053 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8054 		/* No more slots; notify upper layer. */
   8055 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8056 	}
   8057 
   8058 	if (txq->txq_free != ofree) {
   8059 		/* Set a watchdog timer in case the chip flakes out. */
   8060 		txq->txq_lastsent = time_uptime;
   8061 		txq->txq_sending = true;
   8062 	}
   8063 }
   8064 
   8065 /*
   8066  * wm_nq_tx_offload:
   8067  *
   8068  *	Set up TCP/IP checksumming parameters for the
   8069  *	specified packet, for NEWQUEUE devices
   8070  */
   8071 static void
   8072 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8073     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   8074 {
   8075 	struct mbuf *m0 = txs->txs_mbuf;
   8076 	uint32_t vl_len, mssidx, cmdc;
   8077 	struct ether_header *eh;
   8078 	int offset, iphl;
   8079 
   8080 	/*
   8081 	 * XXX It would be nice if the mbuf pkthdr had offset
   8082 	 * fields for the protocol headers.
   8083 	 */
   8084 	*cmdlenp = 0;
   8085 	*fieldsp = 0;
   8086 
   8087 	eh = mtod(m0, struct ether_header *);
   8088 	switch (htons(eh->ether_type)) {
   8089 	case ETHERTYPE_IP:
   8090 	case ETHERTYPE_IPV6:
   8091 		offset = ETHER_HDR_LEN;
   8092 		break;
   8093 
   8094 	case ETHERTYPE_VLAN:
   8095 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8096 		break;
   8097 
   8098 	default:
   8099 		/* Don't support this protocol or encapsulation. */
   8100 		*do_csum = false;
   8101 		return;
   8102 	}
   8103 	*do_csum = true;
   8104 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   8105 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   8106 
   8107 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   8108 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   8109 
   8110 	if ((m0->m_pkthdr.csum_flags &
   8111 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8112 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8113 	} else {
   8114 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8115 	}
   8116 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   8117 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   8118 
   8119 	if (vlan_has_tag(m0)) {
   8120 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8121 		    << NQTXC_VLLEN_VLAN_SHIFT);
   8122 		*cmdlenp |= NQTX_CMD_VLE;
   8123 	}
   8124 
   8125 	mssidx = 0;
   8126 
   8127 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8128 		int hlen = offset + iphl;
   8129 		int tcp_hlen;
   8130 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8131 
   8132 		if (__predict_false(m0->m_len <
   8133 				    (hlen + sizeof(struct tcphdr)))) {
   8134 			/*
   8135 			 * TCP/IP headers are not in the first mbuf; we need
   8136 			 * to do this the slow and painful way. Let's just
   8137 			 * hope this doesn't happen very often.
   8138 			 */
   8139 			struct tcphdr th;
   8140 
   8141 			WM_Q_EVCNT_INCR(txq, tsopain);
   8142 
   8143 			m_copydata(m0, hlen, sizeof(th), &th);
   8144 			if (v4) {
   8145 				struct ip ip;
   8146 
   8147 				m_copydata(m0, offset, sizeof(ip), &ip);
   8148 				ip.ip_len = 0;
   8149 				m_copyback(m0,
   8150 				    offset + offsetof(struct ip, ip_len),
   8151 				    sizeof(ip.ip_len), &ip.ip_len);
   8152 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8153 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8154 			} else {
   8155 				struct ip6_hdr ip6;
   8156 
   8157 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8158 				ip6.ip6_plen = 0;
   8159 				m_copyback(m0,
   8160 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8161 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8162 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8163 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8164 			}
   8165 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8166 			    sizeof(th.th_sum), &th.th_sum);
   8167 
   8168 			tcp_hlen = th.th_off << 2;
   8169 		} else {
   8170 			/*
   8171 			 * TCP/IP headers are in the first mbuf; we can do
   8172 			 * this the easy way.
   8173 			 */
   8174 			struct tcphdr *th;
   8175 
   8176 			if (v4) {
   8177 				struct ip *ip =
   8178 				    (void *)(mtod(m0, char *) + offset);
   8179 				th = (void *)(mtod(m0, char *) + hlen);
   8180 
   8181 				ip->ip_len = 0;
   8182 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8183 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8184 			} else {
   8185 				struct ip6_hdr *ip6 =
   8186 				    (void *)(mtod(m0, char *) + offset);
   8187 				th = (void *)(mtod(m0, char *) + hlen);
   8188 
   8189 				ip6->ip6_plen = 0;
   8190 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8191 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8192 			}
   8193 			tcp_hlen = th->th_off << 2;
   8194 		}
   8195 		hlen += tcp_hlen;
   8196 		*cmdlenp |= NQTX_CMD_TSE;
   8197 
   8198 		if (v4) {
   8199 			WM_Q_EVCNT_INCR(txq, tso);
   8200 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   8201 		} else {
   8202 			WM_Q_EVCNT_INCR(txq, tso6);
   8203 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   8204 		}
   8205 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   8206 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8207 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   8208 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   8209 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   8210 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   8211 	} else {
   8212 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   8213 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8214 	}
   8215 
   8216 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   8217 		*fieldsp |= NQTXD_FIELDS_IXSM;
   8218 		cmdc |= NQTXC_CMD_IP4;
   8219 	}
   8220 
   8221 	if (m0->m_pkthdr.csum_flags &
   8222 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   8223 		WM_Q_EVCNT_INCR(txq, tusum);
   8224 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   8225 			cmdc |= NQTXC_CMD_TCP;
   8226 		else
   8227 			cmdc |= NQTXC_CMD_UDP;
   8228 
   8229 		cmdc |= NQTXC_CMD_IP4;
   8230 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8231 	}
   8232 	if (m0->m_pkthdr.csum_flags &
   8233 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8234 		WM_Q_EVCNT_INCR(txq, tusum6);
   8235 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8236 			cmdc |= NQTXC_CMD_TCP;
   8237 		else
   8238 			cmdc |= NQTXC_CMD_UDP;
   8239 
   8240 		cmdc |= NQTXC_CMD_IP6;
   8241 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8242 	}
   8243 
   8244 	/*
   8245 	 * We don't have to write context descriptor for every packet to
   8246 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8247 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8248 	 * controllers.
   8249 	 * It would be overhead to write context descriptor for every packet,
   8250 	 * however it does not cause problems.
   8251 	 */
   8252 	/* Fill in the context descriptor. */
   8253 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   8254 	    htole32(vl_len);
   8255 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8256 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8257 	    htole32(cmdc);
   8258 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8259 	    htole32(mssidx);
   8260 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8261 	DPRINTF(sc, WM_DEBUG_TX,
   8262 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8263 		txq->txq_next, 0, vl_len));
   8264 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8265 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8266 	txs->txs_ndesc++;
   8267 }
   8268 
   8269 /*
   8270  * wm_nq_start:		[ifnet interface function]
   8271  *
   8272  *	Start packet transmission on the interface for NEWQUEUE devices
   8273  */
   8274 static void
   8275 wm_nq_start(struct ifnet *ifp)
   8276 {
   8277 	struct wm_softc *sc = ifp->if_softc;
   8278 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8279 
   8280 #ifdef WM_MPSAFE
   8281 	KASSERT(if_is_mpsafe(ifp));
   8282 #endif
   8283 	/*
   8284 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8285 	 */
   8286 
   8287 	mutex_enter(txq->txq_lock);
   8288 	if (!txq->txq_stopping)
   8289 		wm_nq_start_locked(ifp);
   8290 	mutex_exit(txq->txq_lock);
   8291 }
   8292 
   8293 static void
   8294 wm_nq_start_locked(struct ifnet *ifp)
   8295 {
   8296 	struct wm_softc *sc = ifp->if_softc;
   8297 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8298 
   8299 	wm_nq_send_common_locked(ifp, txq, false);
   8300 }
   8301 
   8302 static int
   8303 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8304 {
   8305 	int qid;
   8306 	struct wm_softc *sc = ifp->if_softc;
   8307 	struct wm_txqueue *txq;
   8308 
   8309 	qid = wm_select_txqueue(ifp, m);
   8310 	txq = &sc->sc_queue[qid].wmq_txq;
   8311 
   8312 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8313 		m_freem(m);
   8314 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8315 		return ENOBUFS;
   8316 	}
   8317 
   8318 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8319 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8320 	if (m->m_flags & M_MCAST)
   8321 		if_statinc_ref(nsr, if_omcasts);
   8322 	IF_STAT_PUTREF(ifp);
   8323 
   8324 	/*
   8325 	 * The situations which this mutex_tryenter() fails at running time
   8326 	 * are below two patterns.
   8327 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8328 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8329 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8330 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8331 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8332 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8333 	 * stuck, either.
   8334 	 */
   8335 	if (mutex_tryenter(txq->txq_lock)) {
   8336 		if (!txq->txq_stopping)
   8337 			wm_nq_transmit_locked(ifp, txq);
   8338 		mutex_exit(txq->txq_lock);
   8339 	}
   8340 
   8341 	return 0;
   8342 }
   8343 
   8344 static void
   8345 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8346 {
   8347 
   8348 	wm_nq_send_common_locked(ifp, txq, true);
   8349 }
   8350 
   8351 static void
   8352 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8353     bool is_transmit)
   8354 {
   8355 	struct wm_softc *sc = ifp->if_softc;
   8356 	struct mbuf *m0;
   8357 	struct wm_txsoft *txs;
   8358 	bus_dmamap_t dmamap;
   8359 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8360 	bool do_csum, sent;
   8361 	bool remap = true;
   8362 
   8363 	KASSERT(mutex_owned(txq->txq_lock));
   8364 
   8365 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8366 		return;
   8367 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8368 		return;
   8369 
   8370 	sent = false;
   8371 
   8372 	/*
   8373 	 * Loop through the send queue, setting up transmit descriptors
   8374 	 * until we drain the queue, or use up all available transmit
   8375 	 * descriptors.
   8376 	 */
   8377 	for (;;) {
   8378 		m0 = NULL;
   8379 
   8380 		/* Get a work queue entry. */
   8381 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8382 			wm_txeof(txq, UINT_MAX);
   8383 			if (txq->txq_sfree == 0) {
   8384 				DPRINTF(sc, WM_DEBUG_TX,
   8385 				    ("%s: TX: no free job descriptors\n",
   8386 					device_xname(sc->sc_dev)));
   8387 				WM_Q_EVCNT_INCR(txq, txsstall);
   8388 				break;
   8389 			}
   8390 		}
   8391 
   8392 		/* Grab a packet off the queue. */
   8393 		if (is_transmit)
   8394 			m0 = pcq_get(txq->txq_interq);
   8395 		else
   8396 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8397 		if (m0 == NULL)
   8398 			break;
   8399 
   8400 		DPRINTF(sc, WM_DEBUG_TX,
   8401 		    ("%s: TX: have packet to transmit: %p\n",
   8402 		    device_xname(sc->sc_dev), m0));
   8403 
   8404 		txs = &txq->txq_soft[txq->txq_snext];
   8405 		dmamap = txs->txs_dmamap;
   8406 
   8407 		/*
   8408 		 * Load the DMA map.  If this fails, the packet either
   8409 		 * didn't fit in the allotted number of segments, or we
   8410 		 * were short on resources.  For the too-many-segments
   8411 		 * case, we simply report an error and drop the packet,
   8412 		 * since we can't sanely copy a jumbo packet to a single
   8413 		 * buffer.
   8414 		 */
   8415 retry:
   8416 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8417 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8418 		if (__predict_false(error)) {
   8419 			if (error == EFBIG) {
   8420 				if (remap == true) {
   8421 					struct mbuf *m;
   8422 
   8423 					remap = false;
   8424 					m = m_defrag(m0, M_NOWAIT);
   8425 					if (m != NULL) {
   8426 						WM_Q_EVCNT_INCR(txq, defrag);
   8427 						m0 = m;
   8428 						goto retry;
   8429 					}
   8430 				}
   8431 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8432 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8433 				    "DMA segments, dropping...\n",
   8434 				    device_xname(sc->sc_dev));
   8435 				wm_dump_mbuf_chain(sc, m0);
   8436 				m_freem(m0);
   8437 				continue;
   8438 			}
   8439 			/* Short on resources, just stop for now. */
   8440 			DPRINTF(sc, WM_DEBUG_TX,
   8441 			    ("%s: TX: dmamap load failed: %d\n",
   8442 				device_xname(sc->sc_dev), error));
   8443 			break;
   8444 		}
   8445 
   8446 		segs_needed = dmamap->dm_nsegs;
   8447 
   8448 		/*
   8449 		 * Ensure we have enough descriptors free to describe
   8450 		 * the packet. Note, we always reserve one descriptor
   8451 		 * at the end of the ring due to the semantics of the
   8452 		 * TDT register, plus one more in the event we need
   8453 		 * to load offload context.
   8454 		 */
   8455 		if (segs_needed > txq->txq_free - 2) {
   8456 			/*
   8457 			 * Not enough free descriptors to transmit this
   8458 			 * packet.  We haven't committed anything yet,
   8459 			 * so just unload the DMA map, put the packet
   8460 			 * pack on the queue, and punt. Notify the upper
   8461 			 * layer that there are no more slots left.
   8462 			 */
   8463 			DPRINTF(sc, WM_DEBUG_TX,
   8464 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8465 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8466 				segs_needed, txq->txq_free - 1));
   8467 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8468 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8469 			WM_Q_EVCNT_INCR(txq, txdstall);
   8470 			break;
   8471 		}
   8472 
   8473 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8474 
   8475 		DPRINTF(sc, WM_DEBUG_TX,
   8476 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8477 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8478 
   8479 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8480 
   8481 		/*
   8482 		 * Store a pointer to the packet so that we can free it
   8483 		 * later.
   8484 		 *
   8485 		 * Initially, we consider the number of descriptors the
   8486 		 * packet uses the number of DMA segments.  This may be
   8487 		 * incremented by 1 if we do checksum offload (a descriptor
   8488 		 * is used to set the checksum context).
   8489 		 */
   8490 		txs->txs_mbuf = m0;
   8491 		txs->txs_firstdesc = txq->txq_next;
   8492 		txs->txs_ndesc = segs_needed;
   8493 
   8494 		/* Set up offload parameters for this packet. */
   8495 		uint32_t cmdlen, fields, dcmdlen;
   8496 		if (m0->m_pkthdr.csum_flags &
   8497 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8498 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8499 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8500 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8501 			    &do_csum);
   8502 		} else {
   8503 			do_csum = false;
   8504 			cmdlen = 0;
   8505 			fields = 0;
   8506 		}
   8507 
   8508 		/* Sync the DMA map. */
   8509 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8510 		    BUS_DMASYNC_PREWRITE);
   8511 
   8512 		/* Initialize the first transmit descriptor. */
   8513 		nexttx = txq->txq_next;
   8514 		if (!do_csum) {
   8515 			/* Setup a legacy descriptor */
   8516 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8517 			    dmamap->dm_segs[0].ds_addr);
   8518 			txq->txq_descs[nexttx].wtx_cmdlen =
   8519 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8520 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8521 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8522 			if (vlan_has_tag(m0)) {
   8523 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8524 				    htole32(WTX_CMD_VLE);
   8525 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8526 				    htole16(vlan_get_tag(m0));
   8527 			} else
   8528 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8529 
   8530 			dcmdlen = 0;
   8531 		} else {
   8532 			/* Setup an advanced data descriptor */
   8533 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8534 			    htole64(dmamap->dm_segs[0].ds_addr);
   8535 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8536 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8537 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   8538 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8539 			    htole32(fields);
   8540 			DPRINTF(sc, WM_DEBUG_TX,
   8541 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8542 				device_xname(sc->sc_dev), nexttx,
   8543 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8544 			DPRINTF(sc, WM_DEBUG_TX,
   8545 			    ("\t 0x%08x%08x\n", fields,
   8546 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8547 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8548 		}
   8549 
   8550 		lasttx = nexttx;
   8551 		nexttx = WM_NEXTTX(txq, nexttx);
   8552 		/*
   8553 		 * Fill in the next descriptors. legacy or advanced format
   8554 		 * is the same here
   8555 		 */
   8556 		for (seg = 1; seg < dmamap->dm_nsegs;
   8557 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8558 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8559 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8560 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8561 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8562 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8563 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8564 			lasttx = nexttx;
   8565 
   8566 			DPRINTF(sc, WM_DEBUG_TX,
   8567 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8568 				device_xname(sc->sc_dev), nexttx,
   8569 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8570 				dmamap->dm_segs[seg].ds_len));
   8571 		}
   8572 
   8573 		KASSERT(lasttx != -1);
   8574 
   8575 		/*
   8576 		 * Set up the command byte on the last descriptor of
   8577 		 * the packet. If we're in the interrupt delay window,
   8578 		 * delay the interrupt.
   8579 		 */
   8580 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8581 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8582 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8583 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8584 
   8585 		txs->txs_lastdesc = lasttx;
   8586 
   8587 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8588 		    device_xname(sc->sc_dev),
   8589 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8590 
   8591 		/* Sync the descriptors we're using. */
   8592 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8593 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8594 
   8595 		/* Give the packet to the chip. */
   8596 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8597 		sent = true;
   8598 
   8599 		DPRINTF(sc, WM_DEBUG_TX,
   8600 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8601 
   8602 		DPRINTF(sc, WM_DEBUG_TX,
   8603 		    ("%s: TX: finished transmitting packet, job %d\n",
   8604 			device_xname(sc->sc_dev), txq->txq_snext));
   8605 
   8606 		/* Advance the tx pointer. */
   8607 		txq->txq_free -= txs->txs_ndesc;
   8608 		txq->txq_next = nexttx;
   8609 
   8610 		txq->txq_sfree--;
   8611 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8612 
   8613 		/* Pass the packet to any BPF listeners. */
   8614 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8615 	}
   8616 
   8617 	if (m0 != NULL) {
   8618 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8619 		WM_Q_EVCNT_INCR(txq, descdrop);
   8620 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8621 			__func__));
   8622 		m_freem(m0);
   8623 	}
   8624 
   8625 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8626 		/* No more slots; notify upper layer. */
   8627 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8628 	}
   8629 
   8630 	if (sent) {
   8631 		/* Set a watchdog timer in case the chip flakes out. */
   8632 		txq->txq_lastsent = time_uptime;
   8633 		txq->txq_sending = true;
   8634 	}
   8635 }
   8636 
   8637 static void
   8638 wm_deferred_start_locked(struct wm_txqueue *txq)
   8639 {
   8640 	struct wm_softc *sc = txq->txq_sc;
   8641 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8642 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8643 	int qid = wmq->wmq_id;
   8644 
   8645 	KASSERT(mutex_owned(txq->txq_lock));
   8646 
   8647 	if (txq->txq_stopping) {
   8648 		mutex_exit(txq->txq_lock);
   8649 		return;
   8650 	}
   8651 
   8652 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8653 		/* XXX need for ALTQ or one CPU system */
   8654 		if (qid == 0)
   8655 			wm_nq_start_locked(ifp);
   8656 		wm_nq_transmit_locked(ifp, txq);
   8657 	} else {
   8658 		/* XXX need for ALTQ or one CPU system */
   8659 		if (qid == 0)
   8660 			wm_start_locked(ifp);
   8661 		wm_transmit_locked(ifp, txq);
   8662 	}
   8663 }
   8664 
   8665 /* Interrupt */
   8666 
   8667 /*
   8668  * wm_txeof:
   8669  *
   8670  *	Helper; handle transmit interrupts.
   8671  */
   8672 static bool
   8673 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8674 {
   8675 	struct wm_softc *sc = txq->txq_sc;
   8676 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8677 	struct wm_txsoft *txs;
   8678 	int count = 0;
   8679 	int i;
   8680 	uint8_t status;
   8681 	bool more = false;
   8682 
   8683 	KASSERT(mutex_owned(txq->txq_lock));
   8684 
   8685 	if (txq->txq_stopping)
   8686 		return false;
   8687 
   8688 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8689 
   8690 	/*
   8691 	 * Go through the Tx list and free mbufs for those
   8692 	 * frames which have been transmitted.
   8693 	 */
   8694 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8695 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8696 		if (limit-- == 0) {
   8697 			more = true;
   8698 			DPRINTF(sc, WM_DEBUG_TX,
   8699 			    ("%s: TX: loop limited, job %d is not processed\n",
   8700 				device_xname(sc->sc_dev), i));
   8701 			break;
   8702 		}
   8703 
   8704 		txs = &txq->txq_soft[i];
   8705 
   8706 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8707 			device_xname(sc->sc_dev), i));
   8708 
   8709 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8710 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8711 
   8712 		status =
   8713 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8714 		if ((status & WTX_ST_DD) == 0) {
   8715 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8716 			    BUS_DMASYNC_PREREAD);
   8717 			break;
   8718 		}
   8719 
   8720 		count++;
   8721 		DPRINTF(sc, WM_DEBUG_TX,
   8722 		    ("%s: TX: job %d done: descs %d..%d\n",
   8723 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8724 		    txs->txs_lastdesc));
   8725 
   8726 		/*
   8727 		 * XXX We should probably be using the statistics
   8728 		 * XXX registers, but I don't know if they exist
   8729 		 * XXX on chips before the i82544.
   8730 		 */
   8731 
   8732 #ifdef WM_EVENT_COUNTERS
   8733 		if (status & WTX_ST_TU)
   8734 			WM_Q_EVCNT_INCR(txq, underrun);
   8735 #endif /* WM_EVENT_COUNTERS */
   8736 
   8737 		/*
   8738 		 * 82574 and newer's document says the status field has neither
   8739 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8740 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8741 		 * Developer's Manual", 82574 datasheet and newer.
   8742 		 *
   8743 		 * XXX I saw the LC bit was set on I218 even though the media
   8744 		 * was full duplex, so the bit might be used for other
   8745 		 * meaning ...(I have no document).
   8746 		 */
   8747 
   8748 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8749 		    && ((sc->sc_type < WM_T_82574)
   8750 			|| (sc->sc_type == WM_T_80003))) {
   8751 			if_statinc(ifp, if_oerrors);
   8752 			if (status & WTX_ST_LC)
   8753 				log(LOG_WARNING, "%s: late collision\n",
   8754 				    device_xname(sc->sc_dev));
   8755 			else if (status & WTX_ST_EC) {
   8756 				if_statadd(ifp, if_collisions,
   8757 				    TX_COLLISION_THRESHOLD + 1);
   8758 				log(LOG_WARNING, "%s: excessive collisions\n",
   8759 				    device_xname(sc->sc_dev));
   8760 			}
   8761 		} else
   8762 			if_statinc(ifp, if_opackets);
   8763 
   8764 		txq->txq_packets++;
   8765 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8766 
   8767 		txq->txq_free += txs->txs_ndesc;
   8768 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8769 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8770 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8771 		m_freem(txs->txs_mbuf);
   8772 		txs->txs_mbuf = NULL;
   8773 	}
   8774 
   8775 	/* Update the dirty transmit buffer pointer. */
   8776 	txq->txq_sdirty = i;
   8777 	DPRINTF(sc, WM_DEBUG_TX,
   8778 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8779 
   8780 	if (count != 0)
   8781 		rnd_add_uint32(&sc->rnd_source, count);
   8782 
   8783 	/*
   8784 	 * If there are no more pending transmissions, cancel the watchdog
   8785 	 * timer.
   8786 	 */
   8787 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8788 		txq->txq_sending = false;
   8789 
   8790 	return more;
   8791 }
   8792 
   8793 static inline uint32_t
   8794 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8795 {
   8796 	struct wm_softc *sc = rxq->rxq_sc;
   8797 
   8798 	if (sc->sc_type == WM_T_82574)
   8799 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8800 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8801 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8802 	else
   8803 		return rxq->rxq_descs[idx].wrx_status;
   8804 }
   8805 
   8806 static inline uint32_t
   8807 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8808 {
   8809 	struct wm_softc *sc = rxq->rxq_sc;
   8810 
   8811 	if (sc->sc_type == WM_T_82574)
   8812 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8813 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8814 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8815 	else
   8816 		return rxq->rxq_descs[idx].wrx_errors;
   8817 }
   8818 
   8819 static inline uint16_t
   8820 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8821 {
   8822 	struct wm_softc *sc = rxq->rxq_sc;
   8823 
   8824 	if (sc->sc_type == WM_T_82574)
   8825 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8826 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8827 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8828 	else
   8829 		return rxq->rxq_descs[idx].wrx_special;
   8830 }
   8831 
   8832 static inline int
   8833 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8834 {
   8835 	struct wm_softc *sc = rxq->rxq_sc;
   8836 
   8837 	if (sc->sc_type == WM_T_82574)
   8838 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8839 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8840 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8841 	else
   8842 		return rxq->rxq_descs[idx].wrx_len;
   8843 }
   8844 
   8845 #ifdef WM_DEBUG
   8846 static inline uint32_t
   8847 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8848 {
   8849 	struct wm_softc *sc = rxq->rxq_sc;
   8850 
   8851 	if (sc->sc_type == WM_T_82574)
   8852 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8853 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8854 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8855 	else
   8856 		return 0;
   8857 }
   8858 
   8859 static inline uint8_t
   8860 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8861 {
   8862 	struct wm_softc *sc = rxq->rxq_sc;
   8863 
   8864 	if (sc->sc_type == WM_T_82574)
   8865 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8866 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8867 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8868 	else
   8869 		return 0;
   8870 }
   8871 #endif /* WM_DEBUG */
   8872 
   8873 static inline bool
   8874 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8875     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8876 {
   8877 
   8878 	if (sc->sc_type == WM_T_82574)
   8879 		return (status & ext_bit) != 0;
   8880 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8881 		return (status & nq_bit) != 0;
   8882 	else
   8883 		return (status & legacy_bit) != 0;
   8884 }
   8885 
   8886 static inline bool
   8887 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8888     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8889 {
   8890 
   8891 	if (sc->sc_type == WM_T_82574)
   8892 		return (error & ext_bit) != 0;
   8893 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8894 		return (error & nq_bit) != 0;
   8895 	else
   8896 		return (error & legacy_bit) != 0;
   8897 }
   8898 
   8899 static inline bool
   8900 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8901 {
   8902 
   8903 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8904 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8905 		return true;
   8906 	else
   8907 		return false;
   8908 }
   8909 
   8910 static inline bool
   8911 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8912 {
   8913 	struct wm_softc *sc = rxq->rxq_sc;
   8914 
   8915 	/* XXX missing error bit for newqueue? */
   8916 	if (wm_rxdesc_is_set_error(sc, errors,
   8917 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8918 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8919 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8920 		NQRXC_ERROR_RXE)) {
   8921 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8922 		    EXTRXC_ERROR_SE, 0))
   8923 			log(LOG_WARNING, "%s: symbol error\n",
   8924 			    device_xname(sc->sc_dev));
   8925 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8926 		    EXTRXC_ERROR_SEQ, 0))
   8927 			log(LOG_WARNING, "%s: receive sequence error\n",
   8928 			    device_xname(sc->sc_dev));
   8929 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8930 		    EXTRXC_ERROR_CE, 0))
   8931 			log(LOG_WARNING, "%s: CRC error\n",
   8932 			    device_xname(sc->sc_dev));
   8933 		return true;
   8934 	}
   8935 
   8936 	return false;
   8937 }
   8938 
   8939 static inline bool
   8940 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8941 {
   8942 	struct wm_softc *sc = rxq->rxq_sc;
   8943 
   8944 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8945 		NQRXC_STATUS_DD)) {
   8946 		/* We have processed all of the receive descriptors. */
   8947 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8948 		return false;
   8949 	}
   8950 
   8951 	return true;
   8952 }
   8953 
   8954 static inline bool
   8955 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8956     uint16_t vlantag, struct mbuf *m)
   8957 {
   8958 
   8959 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8960 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8961 		vlan_set_tag(m, le16toh(vlantag));
   8962 	}
   8963 
   8964 	return true;
   8965 }
   8966 
   8967 static inline void
   8968 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8969     uint32_t errors, struct mbuf *m)
   8970 {
   8971 	struct wm_softc *sc = rxq->rxq_sc;
   8972 
   8973 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8974 		if (wm_rxdesc_is_set_status(sc, status,
   8975 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8976 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8977 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8978 			if (wm_rxdesc_is_set_error(sc, errors,
   8979 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8980 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8981 		}
   8982 		if (wm_rxdesc_is_set_status(sc, status,
   8983 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8984 			/*
   8985 			 * Note: we don't know if this was TCP or UDP,
   8986 			 * so we just set both bits, and expect the
   8987 			 * upper layers to deal.
   8988 			 */
   8989 			WM_Q_EVCNT_INCR(rxq, tusum);
   8990 			m->m_pkthdr.csum_flags |=
   8991 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8992 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8993 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8994 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8995 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8996 		}
   8997 	}
   8998 }
   8999 
   9000 /*
   9001  * wm_rxeof:
   9002  *
   9003  *	Helper; handle receive interrupts.
   9004  */
   9005 static bool
   9006 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   9007 {
   9008 	struct wm_softc *sc = rxq->rxq_sc;
   9009 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9010 	struct wm_rxsoft *rxs;
   9011 	struct mbuf *m;
   9012 	int i, len;
   9013 	int count = 0;
   9014 	uint32_t status, errors;
   9015 	uint16_t vlantag;
   9016 	bool more = false;
   9017 
   9018 	KASSERT(mutex_owned(rxq->rxq_lock));
   9019 
   9020 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   9021 		if (limit-- == 0) {
   9022 			rxq->rxq_ptr = i;
   9023 			more = true;
   9024 			DPRINTF(sc, WM_DEBUG_RX,
   9025 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   9026 				device_xname(sc->sc_dev), i));
   9027 			break;
   9028 		}
   9029 
   9030 		rxs = &rxq->rxq_soft[i];
   9031 
   9032 		DPRINTF(sc, WM_DEBUG_RX,
   9033 		    ("%s: RX: checking descriptor %d\n",
   9034 			device_xname(sc->sc_dev), i));
   9035 		wm_cdrxsync(rxq, i,
   9036 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9037 
   9038 		status = wm_rxdesc_get_status(rxq, i);
   9039 		errors = wm_rxdesc_get_errors(rxq, i);
   9040 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   9041 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   9042 #ifdef WM_DEBUG
   9043 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   9044 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   9045 #endif
   9046 
   9047 		if (!wm_rxdesc_dd(rxq, i, status)) {
   9048 			/*
   9049 			 * Update the receive pointer holding rxq_lock
   9050 			 * consistent with increment counter.
   9051 			 */
   9052 			rxq->rxq_ptr = i;
   9053 			break;
   9054 		}
   9055 
   9056 		count++;
   9057 		if (__predict_false(rxq->rxq_discard)) {
   9058 			DPRINTF(sc, WM_DEBUG_RX,
   9059 			    ("%s: RX: discarding contents of descriptor %d\n",
   9060 				device_xname(sc->sc_dev), i));
   9061 			wm_init_rxdesc(rxq, i);
   9062 			if (wm_rxdesc_is_eop(rxq, status)) {
   9063 				/* Reset our state. */
   9064 				DPRINTF(sc, WM_DEBUG_RX,
   9065 				    ("%s: RX: resetting rxdiscard -> 0\n",
   9066 					device_xname(sc->sc_dev)));
   9067 				rxq->rxq_discard = 0;
   9068 			}
   9069 			continue;
   9070 		}
   9071 
   9072 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9073 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   9074 
   9075 		m = rxs->rxs_mbuf;
   9076 
   9077 		/*
   9078 		 * Add a new receive buffer to the ring, unless of
   9079 		 * course the length is zero. Treat the latter as a
   9080 		 * failed mapping.
   9081 		 */
   9082 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   9083 			/*
   9084 			 * Failed, throw away what we've done so
   9085 			 * far, and discard the rest of the packet.
   9086 			 */
   9087 			if_statinc(ifp, if_ierrors);
   9088 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9089 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   9090 			wm_init_rxdesc(rxq, i);
   9091 			if (!wm_rxdesc_is_eop(rxq, status))
   9092 				rxq->rxq_discard = 1;
   9093 			if (rxq->rxq_head != NULL)
   9094 				m_freem(rxq->rxq_head);
   9095 			WM_RXCHAIN_RESET(rxq);
   9096 			DPRINTF(sc, WM_DEBUG_RX,
   9097 			    ("%s: RX: Rx buffer allocation failed, "
   9098 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   9099 				rxq->rxq_discard ? " (discard)" : ""));
   9100 			continue;
   9101 		}
   9102 
   9103 		m->m_len = len;
   9104 		rxq->rxq_len += len;
   9105 		DPRINTF(sc, WM_DEBUG_RX,
   9106 		    ("%s: RX: buffer at %p len %d\n",
   9107 			device_xname(sc->sc_dev), m->m_data, len));
   9108 
   9109 		/* If this is not the end of the packet, keep looking. */
   9110 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9111 			WM_RXCHAIN_LINK(rxq, m);
   9112 			DPRINTF(sc, WM_DEBUG_RX,
   9113 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9114 				device_xname(sc->sc_dev), rxq->rxq_len));
   9115 			continue;
   9116 		}
   9117 
   9118 		/*
   9119 		 * Okay, we have the entire packet now. The chip is
   9120 		 * configured to include the FCS except I35[04], I21[01].
   9121 		 * (not all chips can be configured to strip it), so we need
   9122 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
   9123 		 * in RCTL register is always set, so we don't trim it.
   9124 		 * PCH2 and newer chip also not include FCS when jumbo
   9125 		 * frame is used to do workaround an errata.
   9126 		 * May need to adjust length of previous mbuf in the
   9127 		 * chain if the current mbuf is too short.
   9128 		 */
   9129 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
   9130 			if (m->m_len < ETHER_CRC_LEN) {
   9131 				rxq->rxq_tail->m_len
   9132 				    -= (ETHER_CRC_LEN - m->m_len);
   9133 				m->m_len = 0;
   9134 			} else
   9135 				m->m_len -= ETHER_CRC_LEN;
   9136 			len = rxq->rxq_len - ETHER_CRC_LEN;
   9137 		} else
   9138 			len = rxq->rxq_len;
   9139 
   9140 		WM_RXCHAIN_LINK(rxq, m);
   9141 
   9142 		*rxq->rxq_tailp = NULL;
   9143 		m = rxq->rxq_head;
   9144 
   9145 		WM_RXCHAIN_RESET(rxq);
   9146 
   9147 		DPRINTF(sc, WM_DEBUG_RX,
   9148 		    ("%s: RX: have entire packet, len -> %d\n",
   9149 			device_xname(sc->sc_dev), len));
   9150 
   9151 		/* If an error occurred, update stats and drop the packet. */
   9152 		if (wm_rxdesc_has_errors(rxq, errors)) {
   9153 			m_freem(m);
   9154 			continue;
   9155 		}
   9156 
   9157 		/* No errors.  Receive the packet. */
   9158 		m_set_rcvif(m, ifp);
   9159 		m->m_pkthdr.len = len;
   9160 		/*
   9161 		 * TODO
   9162 		 * should be save rsshash and rsstype to this mbuf.
   9163 		 */
   9164 		DPRINTF(sc, WM_DEBUG_RX,
   9165 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   9166 			device_xname(sc->sc_dev), rsstype, rsshash));
   9167 
   9168 		/*
   9169 		 * If VLANs are enabled, VLAN packets have been unwrapped
   9170 		 * for us.  Associate the tag with the packet.
   9171 		 */
   9172 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   9173 			continue;
   9174 
   9175 		/* Set up checksum info for this packet. */
   9176 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   9177 		/*
   9178 		 * Update the receive pointer holding rxq_lock consistent with
   9179 		 * increment counter.
   9180 		 */
   9181 		rxq->rxq_ptr = i;
   9182 		rxq->rxq_packets++;
   9183 		rxq->rxq_bytes += len;
   9184 		mutex_exit(rxq->rxq_lock);
   9185 
   9186 		/* Pass it on. */
   9187 		if_percpuq_enqueue(sc->sc_ipq, m);
   9188 
   9189 		mutex_enter(rxq->rxq_lock);
   9190 
   9191 		if (rxq->rxq_stopping)
   9192 			break;
   9193 	}
   9194 
   9195 	if (count != 0)
   9196 		rnd_add_uint32(&sc->rnd_source, count);
   9197 
   9198 	DPRINTF(sc, WM_DEBUG_RX,
   9199 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   9200 
   9201 	return more;
   9202 }
   9203 
   9204 /*
   9205  * wm_linkintr_gmii:
   9206  *
   9207  *	Helper; handle link interrupts for GMII.
   9208  */
   9209 static void
   9210 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   9211 {
   9212 	device_t dev = sc->sc_dev;
   9213 	uint32_t status, reg;
   9214 	bool link;
   9215 	int rv;
   9216 
   9217 	KASSERT(WM_CORE_LOCKED(sc));
   9218 
   9219 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9220 		__func__));
   9221 
   9222 	if ((icr & ICR_LSC) == 0) {
   9223 		if (icr & ICR_RXSEQ)
   9224 			DPRINTF(sc, WM_DEBUG_LINK,
   9225 			    ("%s: LINK Receive sequence error\n",
   9226 				device_xname(dev)));
   9227 		return;
   9228 	}
   9229 
   9230 	/* Link status changed */
   9231 	status = CSR_READ(sc, WMREG_STATUS);
   9232 	link = status & STATUS_LU;
   9233 	if (link) {
   9234 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9235 			device_xname(dev),
   9236 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9237 	} else {
   9238 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9239 			device_xname(dev)));
   9240 	}
   9241 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9242 		wm_gig_downshift_workaround_ich8lan(sc);
   9243 
   9244 	if ((sc->sc_type == WM_T_ICH8)
   9245 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   9246 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9247 	}
   9248 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9249 		device_xname(dev)));
   9250 	mii_pollstat(&sc->sc_mii);
   9251 	if (sc->sc_type == WM_T_82543) {
   9252 		int miistatus, active;
   9253 
   9254 		/*
   9255 		 * With 82543, we need to force speed and
   9256 		 * duplex on the MAC equal to what the PHY
   9257 		 * speed and duplex configuration is.
   9258 		 */
   9259 		miistatus = sc->sc_mii.mii_media_status;
   9260 
   9261 		if (miistatus & IFM_ACTIVE) {
   9262 			active = sc->sc_mii.mii_media_active;
   9263 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9264 			switch (IFM_SUBTYPE(active)) {
   9265 			case IFM_10_T:
   9266 				sc->sc_ctrl |= CTRL_SPEED_10;
   9267 				break;
   9268 			case IFM_100_TX:
   9269 				sc->sc_ctrl |= CTRL_SPEED_100;
   9270 				break;
   9271 			case IFM_1000_T:
   9272 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9273 				break;
   9274 			default:
   9275 				/*
   9276 				 * Fiber?
   9277 				 * Shoud not enter here.
   9278 				 */
   9279 				device_printf(dev, "unknown media (%x)\n",
   9280 				    active);
   9281 				break;
   9282 			}
   9283 			if (active & IFM_FDX)
   9284 				sc->sc_ctrl |= CTRL_FD;
   9285 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9286 		}
   9287 	} else if (sc->sc_type == WM_T_PCH) {
   9288 		wm_k1_gig_workaround_hv(sc,
   9289 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9290 	}
   9291 
   9292 	/*
   9293 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9294 	 * aggressive resulting in many collisions. To avoid this, increase
   9295 	 * the IPG and reduce Rx latency in the PHY.
   9296 	 */
   9297 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9298 	    && link) {
   9299 		uint32_t tipg_reg;
   9300 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9301 		bool fdx;
   9302 		uint16_t emi_addr, emi_val;
   9303 
   9304 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9305 		tipg_reg &= ~TIPG_IPGT_MASK;
   9306 		fdx = status & STATUS_FD;
   9307 
   9308 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9309 			tipg_reg |= 0xff;
   9310 			/* Reduce Rx latency in analog PHY */
   9311 			emi_val = 0;
   9312 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9313 		    fdx && speed != STATUS_SPEED_1000) {
   9314 			tipg_reg |= 0xc;
   9315 			emi_val = 1;
   9316 		} else {
   9317 			/* Roll back the default values */
   9318 			tipg_reg |= 0x08;
   9319 			emi_val = 1;
   9320 		}
   9321 
   9322 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9323 
   9324 		rv = sc->phy.acquire(sc);
   9325 		if (rv)
   9326 			return;
   9327 
   9328 		if (sc->sc_type == WM_T_PCH2)
   9329 			emi_addr = I82579_RX_CONFIG;
   9330 		else
   9331 			emi_addr = I217_RX_CONFIG;
   9332 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9333 
   9334 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9335 			uint16_t phy_reg;
   9336 
   9337 			sc->phy.readreg_locked(dev, 2,
   9338 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9339 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9340 			if (speed == STATUS_SPEED_100
   9341 			    || speed == STATUS_SPEED_10)
   9342 				phy_reg |= 0x3e8;
   9343 			else
   9344 				phy_reg |= 0xfa;
   9345 			sc->phy.writereg_locked(dev, 2,
   9346 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9347 
   9348 			if (speed == STATUS_SPEED_1000) {
   9349 				sc->phy.readreg_locked(dev, 2,
   9350 				    HV_PM_CTRL, &phy_reg);
   9351 
   9352 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9353 
   9354 				sc->phy.writereg_locked(dev, 2,
   9355 				    HV_PM_CTRL, phy_reg);
   9356 			}
   9357 		}
   9358 		sc->phy.release(sc);
   9359 
   9360 		if (rv)
   9361 			return;
   9362 
   9363 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9364 			uint16_t data, ptr_gap;
   9365 
   9366 			if (speed == STATUS_SPEED_1000) {
   9367 				rv = sc->phy.acquire(sc);
   9368 				if (rv)
   9369 					return;
   9370 
   9371 				rv = sc->phy.readreg_locked(dev, 2,
   9372 				    I82579_UNKNOWN1, &data);
   9373 				if (rv) {
   9374 					sc->phy.release(sc);
   9375 					return;
   9376 				}
   9377 
   9378 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9379 				if (ptr_gap < 0x18) {
   9380 					data &= ~(0x3ff << 2);
   9381 					data |= (0x18 << 2);
   9382 					rv = sc->phy.writereg_locked(dev,
   9383 					    2, I82579_UNKNOWN1, data);
   9384 				}
   9385 				sc->phy.release(sc);
   9386 				if (rv)
   9387 					return;
   9388 			} else {
   9389 				rv = sc->phy.acquire(sc);
   9390 				if (rv)
   9391 					return;
   9392 
   9393 				rv = sc->phy.writereg_locked(dev, 2,
   9394 				    I82579_UNKNOWN1, 0xc023);
   9395 				sc->phy.release(sc);
   9396 				if (rv)
   9397 					return;
   9398 
   9399 			}
   9400 		}
   9401 	}
   9402 
   9403 	/*
   9404 	 * I217 Packet Loss issue:
   9405 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9406 	 * on power up.
   9407 	 * Set the Beacon Duration for I217 to 8 usec
   9408 	 */
   9409 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9410 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9411 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9412 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9413 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9414 	}
   9415 
   9416 	/* Work-around I218 hang issue */
   9417 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9418 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9419 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9420 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9421 		wm_k1_workaround_lpt_lp(sc, link);
   9422 
   9423 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9424 		/*
   9425 		 * Set platform power management values for Latency
   9426 		 * Tolerance Reporting (LTR)
   9427 		 */
   9428 		wm_platform_pm_pch_lpt(sc,
   9429 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9430 	}
   9431 
   9432 	/* Clear link partner's EEE ability */
   9433 	sc->eee_lp_ability = 0;
   9434 
   9435 	/* FEXTNVM6 K1-off workaround */
   9436 	if (sc->sc_type == WM_T_PCH_SPT) {
   9437 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9438 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9439 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9440 		else
   9441 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9442 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9443 	}
   9444 
   9445 	if (!link)
   9446 		return;
   9447 
   9448 	switch (sc->sc_type) {
   9449 	case WM_T_PCH2:
   9450 		wm_k1_workaround_lv(sc);
   9451 		/* FALLTHROUGH */
   9452 	case WM_T_PCH:
   9453 		if (sc->sc_phytype == WMPHY_82578)
   9454 			wm_link_stall_workaround_hv(sc);
   9455 		break;
   9456 	default:
   9457 		break;
   9458 	}
   9459 
   9460 	/* Enable/Disable EEE after link up */
   9461 	if (sc->sc_phytype > WMPHY_82579)
   9462 		wm_set_eee_pchlan(sc);
   9463 }
   9464 
   9465 /*
   9466  * wm_linkintr_tbi:
   9467  *
   9468  *	Helper; handle link interrupts for TBI mode.
   9469  */
   9470 static void
   9471 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9472 {
   9473 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9474 	uint32_t status;
   9475 
   9476 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9477 		__func__));
   9478 
   9479 	status = CSR_READ(sc, WMREG_STATUS);
   9480 	if (icr & ICR_LSC) {
   9481 		wm_check_for_link(sc);
   9482 		if (status & STATUS_LU) {
   9483 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9484 				device_xname(sc->sc_dev),
   9485 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9486 			/*
   9487 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9488 			 * so we should update sc->sc_ctrl
   9489 			 */
   9490 
   9491 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9492 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9493 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9494 			if (status & STATUS_FD)
   9495 				sc->sc_tctl |=
   9496 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9497 			else
   9498 				sc->sc_tctl |=
   9499 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9500 			if (sc->sc_ctrl & CTRL_TFCE)
   9501 				sc->sc_fcrtl |= FCRTL_XONE;
   9502 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9503 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9504 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9505 			sc->sc_tbi_linkup = 1;
   9506 			if_link_state_change(ifp, LINK_STATE_UP);
   9507 		} else {
   9508 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9509 				device_xname(sc->sc_dev)));
   9510 			sc->sc_tbi_linkup = 0;
   9511 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9512 		}
   9513 		/* Update LED */
   9514 		wm_tbi_serdes_set_linkled(sc);
   9515 	} else if (icr & ICR_RXSEQ)
   9516 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9517 			device_xname(sc->sc_dev)));
   9518 }
   9519 
   9520 /*
   9521  * wm_linkintr_serdes:
   9522  *
   9523  *	Helper; handle link interrupts for TBI mode.
   9524  */
   9525 static void
   9526 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9527 {
   9528 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9529 	struct mii_data *mii = &sc->sc_mii;
   9530 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9531 	uint32_t pcs_adv, pcs_lpab, reg;
   9532 
   9533 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9534 		__func__));
   9535 
   9536 	if (icr & ICR_LSC) {
   9537 		/* Check PCS */
   9538 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9539 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9540 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9541 				device_xname(sc->sc_dev)));
   9542 			mii->mii_media_status |= IFM_ACTIVE;
   9543 			sc->sc_tbi_linkup = 1;
   9544 			if_link_state_change(ifp, LINK_STATE_UP);
   9545 		} else {
   9546 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9547 				device_xname(sc->sc_dev)));
   9548 			mii->mii_media_status |= IFM_NONE;
   9549 			sc->sc_tbi_linkup = 0;
   9550 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9551 			wm_tbi_serdes_set_linkled(sc);
   9552 			return;
   9553 		}
   9554 		mii->mii_media_active |= IFM_1000_SX;
   9555 		if ((reg & PCS_LSTS_FDX) != 0)
   9556 			mii->mii_media_active |= IFM_FDX;
   9557 		else
   9558 			mii->mii_media_active |= IFM_HDX;
   9559 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9560 			/* Check flow */
   9561 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9562 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9563 				DPRINTF(sc, WM_DEBUG_LINK,
   9564 				    ("XXX LINKOK but not ACOMP\n"));
   9565 				return;
   9566 			}
   9567 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9568 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9569 			DPRINTF(sc, WM_DEBUG_LINK,
   9570 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9571 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9572 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9573 				mii->mii_media_active |= IFM_FLOW
   9574 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9575 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9576 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9577 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9578 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9579 				mii->mii_media_active |= IFM_FLOW
   9580 				    | IFM_ETH_TXPAUSE;
   9581 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9582 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9583 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9584 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9585 				mii->mii_media_active |= IFM_FLOW
   9586 				    | IFM_ETH_RXPAUSE;
   9587 		}
   9588 		/* Update LED */
   9589 		wm_tbi_serdes_set_linkled(sc);
   9590 	} else
   9591 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9592 		    device_xname(sc->sc_dev)));
   9593 }
   9594 
   9595 /*
   9596  * wm_linkintr:
   9597  *
   9598  *	Helper; handle link interrupts.
   9599  */
   9600 static void
   9601 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9602 {
   9603 
   9604 	KASSERT(WM_CORE_LOCKED(sc));
   9605 
   9606 	if (sc->sc_flags & WM_F_HAS_MII)
   9607 		wm_linkintr_gmii(sc, icr);
   9608 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9609 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9610 		wm_linkintr_serdes(sc, icr);
   9611 	else
   9612 		wm_linkintr_tbi(sc, icr);
   9613 }
   9614 
   9615 
   9616 static inline void
   9617 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   9618 {
   9619 
   9620 	if (wmq->wmq_txrx_use_workqueue)
   9621 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
   9622 	else
   9623 		softint_schedule(wmq->wmq_si);
   9624 }
   9625 
   9626 /*
   9627  * wm_intr_legacy:
   9628  *
   9629  *	Interrupt service routine for INTx and MSI.
   9630  */
   9631 static int
   9632 wm_intr_legacy(void *arg)
   9633 {
   9634 	struct wm_softc *sc = arg;
   9635 	struct wm_queue *wmq = &sc->sc_queue[0];
   9636 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9637 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9638 	uint32_t icr, rndval = 0;
   9639 	int handled = 0;
   9640 
   9641 	while (1 /* CONSTCOND */) {
   9642 		icr = CSR_READ(sc, WMREG_ICR);
   9643 		if ((icr & sc->sc_icr) == 0)
   9644 			break;
   9645 		if (handled == 0)
   9646 			DPRINTF(sc, WM_DEBUG_TX,
   9647 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9648 		if (rndval == 0)
   9649 			rndval = icr;
   9650 
   9651 		mutex_enter(rxq->rxq_lock);
   9652 
   9653 		if (rxq->rxq_stopping) {
   9654 			mutex_exit(rxq->rxq_lock);
   9655 			break;
   9656 		}
   9657 
   9658 		handled = 1;
   9659 
   9660 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9661 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9662 			DPRINTF(sc, WM_DEBUG_RX,
   9663 			    ("%s: RX: got Rx intr 0x%08x\n",
   9664 				device_xname(sc->sc_dev),
   9665 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9666 			WM_Q_EVCNT_INCR(rxq, intr);
   9667 		}
   9668 #endif
   9669 		/*
   9670 		 * wm_rxeof() does *not* call upper layer functions directly,
   9671 		 * as if_percpuq_enqueue() just call softint_schedule().
   9672 		 * So, we can call wm_rxeof() in interrupt context.
   9673 		 */
   9674 		wm_rxeof(rxq, UINT_MAX);
   9675 
   9676 		mutex_exit(rxq->rxq_lock);
   9677 		mutex_enter(txq->txq_lock);
   9678 
   9679 		if (txq->txq_stopping) {
   9680 			mutex_exit(txq->txq_lock);
   9681 			break;
   9682 		}
   9683 
   9684 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9685 		if (icr & ICR_TXDW) {
   9686 			DPRINTF(sc, WM_DEBUG_TX,
   9687 			    ("%s: TX: got TXDW interrupt\n",
   9688 				device_xname(sc->sc_dev)));
   9689 			WM_Q_EVCNT_INCR(txq, txdw);
   9690 		}
   9691 #endif
   9692 		wm_txeof(txq, UINT_MAX);
   9693 
   9694 		mutex_exit(txq->txq_lock);
   9695 		WM_CORE_LOCK(sc);
   9696 
   9697 		if (sc->sc_core_stopping) {
   9698 			WM_CORE_UNLOCK(sc);
   9699 			break;
   9700 		}
   9701 
   9702 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9703 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9704 			wm_linkintr(sc, icr);
   9705 		}
   9706 		if ((icr & ICR_GPI(0)) != 0)
   9707 			device_printf(sc->sc_dev, "got module interrupt\n");
   9708 
   9709 		WM_CORE_UNLOCK(sc);
   9710 
   9711 		if (icr & ICR_RXO) {
   9712 #if defined(WM_DEBUG)
   9713 			log(LOG_WARNING, "%s: Receive overrun\n",
   9714 			    device_xname(sc->sc_dev));
   9715 #endif /* defined(WM_DEBUG) */
   9716 		}
   9717 	}
   9718 
   9719 	rnd_add_uint32(&sc->rnd_source, rndval);
   9720 
   9721 	if (handled) {
   9722 		/* Try to get more packets going. */
   9723 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9724 		wm_sched_handle_queue(sc, wmq);
   9725 	}
   9726 
   9727 	return handled;
   9728 }
   9729 
   9730 static inline void
   9731 wm_txrxintr_disable(struct wm_queue *wmq)
   9732 {
   9733 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9734 
   9735 	if (sc->sc_type == WM_T_82574)
   9736 		CSR_WRITE(sc, WMREG_IMC,
   9737 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9738 	else if (sc->sc_type == WM_T_82575)
   9739 		CSR_WRITE(sc, WMREG_EIMC,
   9740 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9741 	else
   9742 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9743 }
   9744 
   9745 static inline void
   9746 wm_txrxintr_enable(struct wm_queue *wmq)
   9747 {
   9748 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9749 
   9750 	wm_itrs_calculate(sc, wmq);
   9751 
   9752 	/*
   9753 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9754 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9755 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9756 	 * while each wm_handle_queue(wmq) is runnig.
   9757 	 */
   9758 	if (sc->sc_type == WM_T_82574)
   9759 		CSR_WRITE(sc, WMREG_IMS,
   9760 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9761 	else if (sc->sc_type == WM_T_82575)
   9762 		CSR_WRITE(sc, WMREG_EIMS,
   9763 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9764 	else
   9765 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9766 }
   9767 
   9768 static int
   9769 wm_txrxintr_msix(void *arg)
   9770 {
   9771 	struct wm_queue *wmq = arg;
   9772 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9773 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9774 	struct wm_softc *sc = txq->txq_sc;
   9775 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9776 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9777 	bool txmore;
   9778 	bool rxmore;
   9779 
   9780 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9781 
   9782 	DPRINTF(sc, WM_DEBUG_TX,
   9783 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9784 
   9785 	wm_txrxintr_disable(wmq);
   9786 
   9787 	mutex_enter(txq->txq_lock);
   9788 
   9789 	if (txq->txq_stopping) {
   9790 		mutex_exit(txq->txq_lock);
   9791 		return 0;
   9792 	}
   9793 
   9794 	WM_Q_EVCNT_INCR(txq, txdw);
   9795 	txmore = wm_txeof(txq, txlimit);
   9796 	/* wm_deferred start() is done in wm_handle_queue(). */
   9797 	mutex_exit(txq->txq_lock);
   9798 
   9799 	DPRINTF(sc, WM_DEBUG_RX,
   9800 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9801 	mutex_enter(rxq->rxq_lock);
   9802 
   9803 	if (rxq->rxq_stopping) {
   9804 		mutex_exit(rxq->rxq_lock);
   9805 		return 0;
   9806 	}
   9807 
   9808 	WM_Q_EVCNT_INCR(rxq, intr);
   9809 	rxmore = wm_rxeof(rxq, rxlimit);
   9810 	mutex_exit(rxq->rxq_lock);
   9811 
   9812 	wm_itrs_writereg(sc, wmq);
   9813 
   9814 	if (txmore || rxmore) {
   9815 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9816 		wm_sched_handle_queue(sc, wmq);
   9817 	} else
   9818 		wm_txrxintr_enable(wmq);
   9819 
   9820 	return 1;
   9821 }
   9822 
   9823 static void
   9824 wm_handle_queue(void *arg)
   9825 {
   9826 	struct wm_queue *wmq = arg;
   9827 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9828 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9829 	struct wm_softc *sc = txq->txq_sc;
   9830 	u_int txlimit = sc->sc_tx_process_limit;
   9831 	u_int rxlimit = sc->sc_rx_process_limit;
   9832 	bool txmore;
   9833 	bool rxmore;
   9834 
   9835 	mutex_enter(txq->txq_lock);
   9836 	if (txq->txq_stopping) {
   9837 		mutex_exit(txq->txq_lock);
   9838 		return;
   9839 	}
   9840 	txmore = wm_txeof(txq, txlimit);
   9841 	wm_deferred_start_locked(txq);
   9842 	mutex_exit(txq->txq_lock);
   9843 
   9844 	mutex_enter(rxq->rxq_lock);
   9845 	if (rxq->rxq_stopping) {
   9846 		mutex_exit(rxq->rxq_lock);
   9847 		return;
   9848 	}
   9849 	WM_Q_EVCNT_INCR(rxq, defer);
   9850 	rxmore = wm_rxeof(rxq, rxlimit);
   9851 	mutex_exit(rxq->rxq_lock);
   9852 
   9853 	if (txmore || rxmore) {
   9854 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9855 		wm_sched_handle_queue(sc, wmq);
   9856 	} else
   9857 		wm_txrxintr_enable(wmq);
   9858 }
   9859 
   9860 static void
   9861 wm_handle_queue_work(struct work *wk, void *context)
   9862 {
   9863 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   9864 
   9865 	/*
   9866 	 * "enqueued flag" is not required here.
   9867 	 */
   9868 	wm_handle_queue(wmq);
   9869 }
   9870 
   9871 /*
   9872  * wm_linkintr_msix:
   9873  *
   9874  *	Interrupt service routine for link status change for MSI-X.
   9875  */
   9876 static int
   9877 wm_linkintr_msix(void *arg)
   9878 {
   9879 	struct wm_softc *sc = arg;
   9880 	uint32_t reg;
   9881 	bool has_rxo;
   9882 
   9883 	reg = CSR_READ(sc, WMREG_ICR);
   9884 	WM_CORE_LOCK(sc);
   9885 	DPRINTF(sc, WM_DEBUG_LINK,
   9886 	    ("%s: LINK: got link intr. ICR = %08x\n",
   9887 		device_xname(sc->sc_dev), reg));
   9888 
   9889 	if (sc->sc_core_stopping)
   9890 		goto out;
   9891 
   9892 	if ((reg & ICR_LSC) != 0) {
   9893 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9894 		wm_linkintr(sc, ICR_LSC);
   9895 	}
   9896 	if ((reg & ICR_GPI(0)) != 0)
   9897 		device_printf(sc->sc_dev, "got module interrupt\n");
   9898 
   9899 	/*
   9900 	 * XXX 82574 MSI-X mode workaround
   9901 	 *
   9902 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9903 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9904 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9905 	 * interrupts by writing WMREG_ICS to process receive packets.
   9906 	 */
   9907 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9908 #if defined(WM_DEBUG)
   9909 		log(LOG_WARNING, "%s: Receive overrun\n",
   9910 		    device_xname(sc->sc_dev));
   9911 #endif /* defined(WM_DEBUG) */
   9912 
   9913 		has_rxo = true;
   9914 		/*
   9915 		 * The RXO interrupt is very high rate when receive traffic is
   9916 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9917 		 * interrupts. ICR_OTHER will be enabled at the end of
   9918 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9919 		 * ICR_RXQ(1) interrupts.
   9920 		 */
   9921 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9922 
   9923 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9924 	}
   9925 
   9926 
   9927 
   9928 out:
   9929 	WM_CORE_UNLOCK(sc);
   9930 
   9931 	if (sc->sc_type == WM_T_82574) {
   9932 		if (!has_rxo)
   9933 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9934 		else
   9935 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9936 	} else if (sc->sc_type == WM_T_82575)
   9937 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9938 	else
   9939 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9940 
   9941 	return 1;
   9942 }
   9943 
   9944 /*
   9945  * Media related.
   9946  * GMII, SGMII, TBI (and SERDES)
   9947  */
   9948 
   9949 /* Common */
   9950 
   9951 /*
   9952  * wm_tbi_serdes_set_linkled:
   9953  *
   9954  *	Update the link LED on TBI and SERDES devices.
   9955  */
   9956 static void
   9957 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9958 {
   9959 
   9960 	if (sc->sc_tbi_linkup)
   9961 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9962 	else
   9963 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9964 
   9965 	/* 82540 or newer devices are active low */
   9966 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9967 
   9968 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9969 }
   9970 
   9971 /* GMII related */
   9972 
   9973 /*
   9974  * wm_gmii_reset:
   9975  *
   9976  *	Reset the PHY.
   9977  */
   9978 static void
   9979 wm_gmii_reset(struct wm_softc *sc)
   9980 {
   9981 	uint32_t reg;
   9982 	int rv;
   9983 
   9984 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   9985 		device_xname(sc->sc_dev), __func__));
   9986 
   9987 	rv = sc->phy.acquire(sc);
   9988 	if (rv != 0) {
   9989 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9990 		    __func__);
   9991 		return;
   9992 	}
   9993 
   9994 	switch (sc->sc_type) {
   9995 	case WM_T_82542_2_0:
   9996 	case WM_T_82542_2_1:
   9997 		/* null */
   9998 		break;
   9999 	case WM_T_82543:
   10000 		/*
   10001 		 * With 82543, we need to force speed and duplex on the MAC
   10002 		 * equal to what the PHY speed and duplex configuration is.
   10003 		 * In addition, we need to perform a hardware reset on the PHY
   10004 		 * to take it out of reset.
   10005 		 */
   10006 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10007 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10008 
   10009 		/* The PHY reset pin is active-low. */
   10010 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10011 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   10012 		    CTRL_EXT_SWDPIN(4));
   10013 		reg |= CTRL_EXT_SWDPIO(4);
   10014 
   10015 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10016 		CSR_WRITE_FLUSH(sc);
   10017 		delay(10*1000);
   10018 
   10019 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   10020 		CSR_WRITE_FLUSH(sc);
   10021 		delay(150);
   10022 #if 0
   10023 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   10024 #endif
   10025 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   10026 		break;
   10027 	case WM_T_82544:	/* Reset 10000us */
   10028 	case WM_T_82540:
   10029 	case WM_T_82545:
   10030 	case WM_T_82545_3:
   10031 	case WM_T_82546:
   10032 	case WM_T_82546_3:
   10033 	case WM_T_82541:
   10034 	case WM_T_82541_2:
   10035 	case WM_T_82547:
   10036 	case WM_T_82547_2:
   10037 	case WM_T_82571:	/* Reset 100us */
   10038 	case WM_T_82572:
   10039 	case WM_T_82573:
   10040 	case WM_T_82574:
   10041 	case WM_T_82575:
   10042 	case WM_T_82576:
   10043 	case WM_T_82580:
   10044 	case WM_T_I350:
   10045 	case WM_T_I354:
   10046 	case WM_T_I210:
   10047 	case WM_T_I211:
   10048 	case WM_T_82583:
   10049 	case WM_T_80003:
   10050 		/* Generic reset */
   10051 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10052 		CSR_WRITE_FLUSH(sc);
   10053 		delay(20000);
   10054 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10055 		CSR_WRITE_FLUSH(sc);
   10056 		delay(20000);
   10057 
   10058 		if ((sc->sc_type == WM_T_82541)
   10059 		    || (sc->sc_type == WM_T_82541_2)
   10060 		    || (sc->sc_type == WM_T_82547)
   10061 		    || (sc->sc_type == WM_T_82547_2)) {
   10062 			/* Workaround for igp are done in igp_reset() */
   10063 			/* XXX add code to set LED after phy reset */
   10064 		}
   10065 		break;
   10066 	case WM_T_ICH8:
   10067 	case WM_T_ICH9:
   10068 	case WM_T_ICH10:
   10069 	case WM_T_PCH:
   10070 	case WM_T_PCH2:
   10071 	case WM_T_PCH_LPT:
   10072 	case WM_T_PCH_SPT:
   10073 	case WM_T_PCH_CNP:
   10074 		/* Generic reset */
   10075 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10076 		CSR_WRITE_FLUSH(sc);
   10077 		delay(100);
   10078 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10079 		CSR_WRITE_FLUSH(sc);
   10080 		delay(150);
   10081 		break;
   10082 	default:
   10083 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   10084 		    __func__);
   10085 		break;
   10086 	}
   10087 
   10088 	sc->phy.release(sc);
   10089 
   10090 	/* get_cfg_done */
   10091 	wm_get_cfg_done(sc);
   10092 
   10093 	/* Extra setup */
   10094 	switch (sc->sc_type) {
   10095 	case WM_T_82542_2_0:
   10096 	case WM_T_82542_2_1:
   10097 	case WM_T_82543:
   10098 	case WM_T_82544:
   10099 	case WM_T_82540:
   10100 	case WM_T_82545:
   10101 	case WM_T_82545_3:
   10102 	case WM_T_82546:
   10103 	case WM_T_82546_3:
   10104 	case WM_T_82541_2:
   10105 	case WM_T_82547_2:
   10106 	case WM_T_82571:
   10107 	case WM_T_82572:
   10108 	case WM_T_82573:
   10109 	case WM_T_82574:
   10110 	case WM_T_82583:
   10111 	case WM_T_82575:
   10112 	case WM_T_82576:
   10113 	case WM_T_82580:
   10114 	case WM_T_I350:
   10115 	case WM_T_I354:
   10116 	case WM_T_I210:
   10117 	case WM_T_I211:
   10118 	case WM_T_80003:
   10119 		/* Null */
   10120 		break;
   10121 	case WM_T_82541:
   10122 	case WM_T_82547:
   10123 		/* XXX Configure actively LED after PHY reset */
   10124 		break;
   10125 	case WM_T_ICH8:
   10126 	case WM_T_ICH9:
   10127 	case WM_T_ICH10:
   10128 	case WM_T_PCH:
   10129 	case WM_T_PCH2:
   10130 	case WM_T_PCH_LPT:
   10131 	case WM_T_PCH_SPT:
   10132 	case WM_T_PCH_CNP:
   10133 		wm_phy_post_reset(sc);
   10134 		break;
   10135 	default:
   10136 		panic("%s: unknown type\n", __func__);
   10137 		break;
   10138 	}
   10139 }
   10140 
   10141 /*
   10142  * Setup sc_phytype and mii_{read|write}reg.
   10143  *
   10144  *  To identify PHY type, correct read/write function should be selected.
   10145  * To select correct read/write function, PCI ID or MAC type are required
   10146  * without accessing PHY registers.
   10147  *
   10148  *  On the first call of this function, PHY ID is not known yet. Check
   10149  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   10150  * result might be incorrect.
   10151  *
   10152  *  In the second call, PHY OUI and model is used to identify PHY type.
   10153  * It might not be perfect because of the lack of compared entry, but it
   10154  * would be better than the first call.
   10155  *
   10156  *  If the detected new result and previous assumption is different,
   10157  * diagnous message will be printed.
   10158  */
   10159 static void
   10160 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   10161     uint16_t phy_model)
   10162 {
   10163 	device_t dev = sc->sc_dev;
   10164 	struct mii_data *mii = &sc->sc_mii;
   10165 	uint16_t new_phytype = WMPHY_UNKNOWN;
   10166 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   10167 	mii_readreg_t new_readreg;
   10168 	mii_writereg_t new_writereg;
   10169 	bool dodiag = true;
   10170 
   10171 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10172 		device_xname(sc->sc_dev), __func__));
   10173 
   10174 	/*
   10175 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   10176 	 * incorrect. So don't print diag output when it's 2nd call.
   10177 	 */
   10178 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   10179 		dodiag = false;
   10180 
   10181 	if (mii->mii_readreg == NULL) {
   10182 		/*
   10183 		 *  This is the first call of this function. For ICH and PCH
   10184 		 * variants, it's difficult to determine the PHY access method
   10185 		 * by sc_type, so use the PCI product ID for some devices.
   10186 		 */
   10187 
   10188 		switch (sc->sc_pcidevid) {
   10189 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   10190 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   10191 			/* 82577 */
   10192 			new_phytype = WMPHY_82577;
   10193 			break;
   10194 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   10195 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   10196 			/* 82578 */
   10197 			new_phytype = WMPHY_82578;
   10198 			break;
   10199 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   10200 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   10201 			/* 82579 */
   10202 			new_phytype = WMPHY_82579;
   10203 			break;
   10204 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   10205 		case PCI_PRODUCT_INTEL_82801I_BM:
   10206 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   10207 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   10208 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   10209 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   10210 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   10211 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   10212 			/* ICH8, 9, 10 with 82567 */
   10213 			new_phytype = WMPHY_BM;
   10214 			break;
   10215 		default:
   10216 			break;
   10217 		}
   10218 	} else {
   10219 		/* It's not the first call. Use PHY OUI and model */
   10220 		switch (phy_oui) {
   10221 		case MII_OUI_ATTANSIC: /* XXX ??? */
   10222 			switch (phy_model) {
   10223 			case 0x0004: /* XXX */
   10224 				new_phytype = WMPHY_82578;
   10225 				break;
   10226 			default:
   10227 				break;
   10228 			}
   10229 			break;
   10230 		case MII_OUI_xxMARVELL:
   10231 			switch (phy_model) {
   10232 			case MII_MODEL_xxMARVELL_I210:
   10233 				new_phytype = WMPHY_I210;
   10234 				break;
   10235 			case MII_MODEL_xxMARVELL_E1011:
   10236 			case MII_MODEL_xxMARVELL_E1000_3:
   10237 			case MII_MODEL_xxMARVELL_E1000_5:
   10238 			case MII_MODEL_xxMARVELL_E1112:
   10239 				new_phytype = WMPHY_M88;
   10240 				break;
   10241 			case MII_MODEL_xxMARVELL_E1149:
   10242 				new_phytype = WMPHY_BM;
   10243 				break;
   10244 			case MII_MODEL_xxMARVELL_E1111:
   10245 			case MII_MODEL_xxMARVELL_I347:
   10246 			case MII_MODEL_xxMARVELL_E1512:
   10247 			case MII_MODEL_xxMARVELL_E1340M:
   10248 			case MII_MODEL_xxMARVELL_E1543:
   10249 				new_phytype = WMPHY_M88;
   10250 				break;
   10251 			case MII_MODEL_xxMARVELL_I82563:
   10252 				new_phytype = WMPHY_GG82563;
   10253 				break;
   10254 			default:
   10255 				break;
   10256 			}
   10257 			break;
   10258 		case MII_OUI_INTEL:
   10259 			switch (phy_model) {
   10260 			case MII_MODEL_INTEL_I82577:
   10261 				new_phytype = WMPHY_82577;
   10262 				break;
   10263 			case MII_MODEL_INTEL_I82579:
   10264 				new_phytype = WMPHY_82579;
   10265 				break;
   10266 			case MII_MODEL_INTEL_I217:
   10267 				new_phytype = WMPHY_I217;
   10268 				break;
   10269 			case MII_MODEL_INTEL_I82580:
   10270 			case MII_MODEL_INTEL_I350:
   10271 				new_phytype = WMPHY_82580;
   10272 				break;
   10273 			default:
   10274 				break;
   10275 			}
   10276 			break;
   10277 		case MII_OUI_yyINTEL:
   10278 			switch (phy_model) {
   10279 			case MII_MODEL_yyINTEL_I82562G:
   10280 			case MII_MODEL_yyINTEL_I82562EM:
   10281 			case MII_MODEL_yyINTEL_I82562ET:
   10282 				new_phytype = WMPHY_IFE;
   10283 				break;
   10284 			case MII_MODEL_yyINTEL_IGP01E1000:
   10285 				new_phytype = WMPHY_IGP;
   10286 				break;
   10287 			case MII_MODEL_yyINTEL_I82566:
   10288 				new_phytype = WMPHY_IGP_3;
   10289 				break;
   10290 			default:
   10291 				break;
   10292 			}
   10293 			break;
   10294 		default:
   10295 			break;
   10296 		}
   10297 
   10298 		if (dodiag) {
   10299 			if (new_phytype == WMPHY_UNKNOWN)
   10300 				aprint_verbose_dev(dev,
   10301 				    "%s: Unknown PHY model. OUI=%06x, "
   10302 				    "model=%04x\n", __func__, phy_oui,
   10303 				    phy_model);
   10304 
   10305 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10306 			    && (sc->sc_phytype != new_phytype)) {
   10307 				aprint_error_dev(dev, "Previously assumed PHY "
   10308 				    "type(%u) was incorrect. PHY type from PHY"
   10309 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   10310 			}
   10311 		}
   10312 	}
   10313 
   10314 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10315 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10316 		/* SGMII */
   10317 		new_readreg = wm_sgmii_readreg;
   10318 		new_writereg = wm_sgmii_writereg;
   10319 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10320 		/* BM2 (phyaddr == 1) */
   10321 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10322 		    && (new_phytype != WMPHY_BM)
   10323 		    && (new_phytype != WMPHY_UNKNOWN))
   10324 			doubt_phytype = new_phytype;
   10325 		new_phytype = WMPHY_BM;
   10326 		new_readreg = wm_gmii_bm_readreg;
   10327 		new_writereg = wm_gmii_bm_writereg;
   10328 	} else if (sc->sc_type >= WM_T_PCH) {
   10329 		/* All PCH* use _hv_ */
   10330 		new_readreg = wm_gmii_hv_readreg;
   10331 		new_writereg = wm_gmii_hv_writereg;
   10332 	} else if (sc->sc_type >= WM_T_ICH8) {
   10333 		/* non-82567 ICH8, 9 and 10 */
   10334 		new_readreg = wm_gmii_i82544_readreg;
   10335 		new_writereg = wm_gmii_i82544_writereg;
   10336 	} else if (sc->sc_type >= WM_T_80003) {
   10337 		/* 80003 */
   10338 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10339 		    && (new_phytype != WMPHY_GG82563)
   10340 		    && (new_phytype != WMPHY_UNKNOWN))
   10341 			doubt_phytype = new_phytype;
   10342 		new_phytype = WMPHY_GG82563;
   10343 		new_readreg = wm_gmii_i80003_readreg;
   10344 		new_writereg = wm_gmii_i80003_writereg;
   10345 	} else if (sc->sc_type >= WM_T_I210) {
   10346 		/* I210 and I211 */
   10347 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10348 		    && (new_phytype != WMPHY_I210)
   10349 		    && (new_phytype != WMPHY_UNKNOWN))
   10350 			doubt_phytype = new_phytype;
   10351 		new_phytype = WMPHY_I210;
   10352 		new_readreg = wm_gmii_gs40g_readreg;
   10353 		new_writereg = wm_gmii_gs40g_writereg;
   10354 	} else if (sc->sc_type >= WM_T_82580) {
   10355 		/* 82580, I350 and I354 */
   10356 		new_readreg = wm_gmii_82580_readreg;
   10357 		new_writereg = wm_gmii_82580_writereg;
   10358 	} else if (sc->sc_type >= WM_T_82544) {
   10359 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10360 		new_readreg = wm_gmii_i82544_readreg;
   10361 		new_writereg = wm_gmii_i82544_writereg;
   10362 	} else {
   10363 		new_readreg = wm_gmii_i82543_readreg;
   10364 		new_writereg = wm_gmii_i82543_writereg;
   10365 	}
   10366 
   10367 	if (new_phytype == WMPHY_BM) {
   10368 		/* All BM use _bm_ */
   10369 		new_readreg = wm_gmii_bm_readreg;
   10370 		new_writereg = wm_gmii_bm_writereg;
   10371 	}
   10372 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10373 		/* All PCH* use _hv_ */
   10374 		new_readreg = wm_gmii_hv_readreg;
   10375 		new_writereg = wm_gmii_hv_writereg;
   10376 	}
   10377 
   10378 	/* Diag output */
   10379 	if (dodiag) {
   10380 		if (doubt_phytype != WMPHY_UNKNOWN)
   10381 			aprint_error_dev(dev, "Assumed new PHY type was "
   10382 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10383 			    new_phytype);
   10384 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10385 		    && (sc->sc_phytype != new_phytype))
   10386 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10387 			    "was incorrect. New PHY type = %u\n",
   10388 			    sc->sc_phytype, new_phytype);
   10389 
   10390 		if ((mii->mii_readreg != NULL) &&
   10391 		    (new_phytype == WMPHY_UNKNOWN))
   10392 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   10393 
   10394 		if ((mii->mii_readreg != NULL) &&
   10395 		    (mii->mii_readreg != new_readreg))
   10396 			aprint_error_dev(dev, "Previously assumed PHY "
   10397 			    "read/write function was incorrect.\n");
   10398 	}
   10399 
   10400 	/* Update now */
   10401 	sc->sc_phytype = new_phytype;
   10402 	mii->mii_readreg = new_readreg;
   10403 	mii->mii_writereg = new_writereg;
   10404 	if (new_readreg == wm_gmii_hv_readreg) {
   10405 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10406 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10407 	} else if (new_readreg == wm_sgmii_readreg) {
   10408 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10409 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10410 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10411 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10412 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10413 	}
   10414 }
   10415 
   10416 /*
   10417  * wm_get_phy_id_82575:
   10418  *
   10419  * Return PHY ID. Return -1 if it failed.
   10420  */
   10421 static int
   10422 wm_get_phy_id_82575(struct wm_softc *sc)
   10423 {
   10424 	uint32_t reg;
   10425 	int phyid = -1;
   10426 
   10427 	/* XXX */
   10428 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10429 		return -1;
   10430 
   10431 	if (wm_sgmii_uses_mdio(sc)) {
   10432 		switch (sc->sc_type) {
   10433 		case WM_T_82575:
   10434 		case WM_T_82576:
   10435 			reg = CSR_READ(sc, WMREG_MDIC);
   10436 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10437 			break;
   10438 		case WM_T_82580:
   10439 		case WM_T_I350:
   10440 		case WM_T_I354:
   10441 		case WM_T_I210:
   10442 		case WM_T_I211:
   10443 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10444 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10445 			break;
   10446 		default:
   10447 			return -1;
   10448 		}
   10449 	}
   10450 
   10451 	return phyid;
   10452 }
   10453 
   10454 /*
   10455  * wm_gmii_mediainit:
   10456  *
   10457  *	Initialize media for use on 1000BASE-T devices.
   10458  */
   10459 static void
   10460 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10461 {
   10462 	device_t dev = sc->sc_dev;
   10463 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10464 	struct mii_data *mii = &sc->sc_mii;
   10465 
   10466 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   10467 		device_xname(sc->sc_dev), __func__));
   10468 
   10469 	/* We have GMII. */
   10470 	sc->sc_flags |= WM_F_HAS_MII;
   10471 
   10472 	if (sc->sc_type == WM_T_80003)
   10473 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10474 	else
   10475 		sc->sc_tipg = TIPG_1000T_DFLT;
   10476 
   10477 	/*
   10478 	 * Let the chip set speed/duplex on its own based on
   10479 	 * signals from the PHY.
   10480 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10481 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10482 	 */
   10483 	sc->sc_ctrl |= CTRL_SLU;
   10484 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10485 
   10486 	/* Initialize our media structures and probe the GMII. */
   10487 	mii->mii_ifp = ifp;
   10488 
   10489 	mii->mii_statchg = wm_gmii_statchg;
   10490 
   10491 	/* get PHY control from SMBus to PCIe */
   10492 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10493 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10494 	    || (sc->sc_type == WM_T_PCH_CNP))
   10495 		wm_init_phy_workarounds_pchlan(sc);
   10496 
   10497 	wm_gmii_reset(sc);
   10498 
   10499 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10500 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10501 	    wm_gmii_mediastatus, sc->sc_core_lock);
   10502 
   10503 	/* Setup internal SGMII PHY for SFP */
   10504 	wm_sgmii_sfp_preconfig(sc);
   10505 
   10506 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10507 	    || (sc->sc_type == WM_T_82580)
   10508 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10509 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10510 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10511 			/* Attach only one port */
   10512 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10513 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10514 		} else {
   10515 			int i, id;
   10516 			uint32_t ctrl_ext;
   10517 
   10518 			id = wm_get_phy_id_82575(sc);
   10519 			if (id != -1) {
   10520 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10521 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10522 			}
   10523 			if ((id == -1)
   10524 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10525 				/* Power on sgmii phy if it is disabled */
   10526 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10527 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10528 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10529 				CSR_WRITE_FLUSH(sc);
   10530 				delay(300*1000); /* XXX too long */
   10531 
   10532 				/*
   10533 				 * From 1 to 8.
   10534 				 *
   10535 				 * I2C access fails with I2C register's ERROR
   10536 				 * bit set, so prevent error message while
   10537 				 * scanning.
   10538 				 */
   10539 				sc->phy.no_errprint = true;
   10540 				for (i = 1; i < 8; i++)
   10541 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10542 					    0xffffffff, i, MII_OFFSET_ANY,
   10543 					    MIIF_DOPAUSE);
   10544 				sc->phy.no_errprint = false;
   10545 
   10546 				/* Restore previous sfp cage power state */
   10547 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10548 			}
   10549 		}
   10550 	} else
   10551 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10552 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10553 
   10554 	/*
   10555 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10556 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10557 	 */
   10558 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10559 		|| (sc->sc_type == WM_T_PCH_SPT)
   10560 		|| (sc->sc_type == WM_T_PCH_CNP))
   10561 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10562 		wm_set_mdio_slow_mode_hv(sc);
   10563 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10564 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10565 	}
   10566 
   10567 	/*
   10568 	 * (For ICH8 variants)
   10569 	 * If PHY detection failed, use BM's r/w function and retry.
   10570 	 */
   10571 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10572 		/* if failed, retry with *_bm_* */
   10573 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10574 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10575 		    sc->sc_phytype);
   10576 		sc->sc_phytype = WMPHY_BM;
   10577 		mii->mii_readreg = wm_gmii_bm_readreg;
   10578 		mii->mii_writereg = wm_gmii_bm_writereg;
   10579 
   10580 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10581 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10582 	}
   10583 
   10584 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10585 		/* Any PHY wasn't find */
   10586 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10587 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10588 		sc->sc_phytype = WMPHY_NONE;
   10589 	} else {
   10590 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10591 
   10592 		/*
   10593 		 * PHY Found! Check PHY type again by the second call of
   10594 		 * wm_gmii_setup_phytype.
   10595 		 */
   10596 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10597 		    child->mii_mpd_model);
   10598 
   10599 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10600 	}
   10601 }
   10602 
   10603 /*
   10604  * wm_gmii_mediachange:	[ifmedia interface function]
   10605  *
   10606  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10607  */
   10608 static int
   10609 wm_gmii_mediachange(struct ifnet *ifp)
   10610 {
   10611 	struct wm_softc *sc = ifp->if_softc;
   10612 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10613 	uint32_t reg;
   10614 	int rc;
   10615 
   10616 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   10617 		device_xname(sc->sc_dev), __func__));
   10618 	if ((ifp->if_flags & IFF_UP) == 0)
   10619 		return 0;
   10620 
   10621 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10622 	if ((sc->sc_type == WM_T_82580)
   10623 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10624 	    || (sc->sc_type == WM_T_I211)) {
   10625 		reg = CSR_READ(sc, WMREG_PHPM);
   10626 		reg &= ~PHPM_GO_LINK_D;
   10627 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10628 	}
   10629 
   10630 	/* Disable D0 LPLU. */
   10631 	wm_lplu_d0_disable(sc);
   10632 
   10633 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10634 	sc->sc_ctrl |= CTRL_SLU;
   10635 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10636 	    || (sc->sc_type > WM_T_82543)) {
   10637 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10638 	} else {
   10639 		sc->sc_ctrl &= ~CTRL_ASDE;
   10640 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10641 		if (ife->ifm_media & IFM_FDX)
   10642 			sc->sc_ctrl |= CTRL_FD;
   10643 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10644 		case IFM_10_T:
   10645 			sc->sc_ctrl |= CTRL_SPEED_10;
   10646 			break;
   10647 		case IFM_100_TX:
   10648 			sc->sc_ctrl |= CTRL_SPEED_100;
   10649 			break;
   10650 		case IFM_1000_T:
   10651 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10652 			break;
   10653 		case IFM_NONE:
   10654 			/* There is no specific setting for IFM_NONE */
   10655 			break;
   10656 		default:
   10657 			panic("wm_gmii_mediachange: bad media 0x%x",
   10658 			    ife->ifm_media);
   10659 		}
   10660 	}
   10661 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10662 	CSR_WRITE_FLUSH(sc);
   10663 
   10664 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   10665 		wm_serdes_mediachange(ifp);
   10666 
   10667 	if (sc->sc_type <= WM_T_82543)
   10668 		wm_gmii_reset(sc);
   10669 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   10670 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   10671 		/* allow time for SFP cage time to power up phy */
   10672 		delay(300 * 1000);
   10673 		wm_gmii_reset(sc);
   10674 	}
   10675 
   10676 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10677 		return 0;
   10678 	return rc;
   10679 }
   10680 
   10681 /*
   10682  * wm_gmii_mediastatus:	[ifmedia interface function]
   10683  *
   10684  *	Get the current interface media status on a 1000BASE-T device.
   10685  */
   10686 static void
   10687 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10688 {
   10689 	struct wm_softc *sc = ifp->if_softc;
   10690 
   10691 	ether_mediastatus(ifp, ifmr);
   10692 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10693 	    | sc->sc_flowflags;
   10694 }
   10695 
   10696 #define	MDI_IO		CTRL_SWDPIN(2)
   10697 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10698 #define	MDI_CLK		CTRL_SWDPIN(3)
   10699 
   10700 static void
   10701 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10702 {
   10703 	uint32_t i, v;
   10704 
   10705 	v = CSR_READ(sc, WMREG_CTRL);
   10706 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10707 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10708 
   10709 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   10710 		if (data & i)
   10711 			v |= MDI_IO;
   10712 		else
   10713 			v &= ~MDI_IO;
   10714 		CSR_WRITE(sc, WMREG_CTRL, v);
   10715 		CSR_WRITE_FLUSH(sc);
   10716 		delay(10);
   10717 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10718 		CSR_WRITE_FLUSH(sc);
   10719 		delay(10);
   10720 		CSR_WRITE(sc, WMREG_CTRL, v);
   10721 		CSR_WRITE_FLUSH(sc);
   10722 		delay(10);
   10723 	}
   10724 }
   10725 
   10726 static uint16_t
   10727 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10728 {
   10729 	uint32_t v, i;
   10730 	uint16_t data = 0;
   10731 
   10732 	v = CSR_READ(sc, WMREG_CTRL);
   10733 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10734 	v |= CTRL_SWDPIO(3);
   10735 
   10736 	CSR_WRITE(sc, WMREG_CTRL, v);
   10737 	CSR_WRITE_FLUSH(sc);
   10738 	delay(10);
   10739 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10740 	CSR_WRITE_FLUSH(sc);
   10741 	delay(10);
   10742 	CSR_WRITE(sc, WMREG_CTRL, v);
   10743 	CSR_WRITE_FLUSH(sc);
   10744 	delay(10);
   10745 
   10746 	for (i = 0; i < 16; i++) {
   10747 		data <<= 1;
   10748 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10749 		CSR_WRITE_FLUSH(sc);
   10750 		delay(10);
   10751 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10752 			data |= 1;
   10753 		CSR_WRITE(sc, WMREG_CTRL, v);
   10754 		CSR_WRITE_FLUSH(sc);
   10755 		delay(10);
   10756 	}
   10757 
   10758 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10759 	CSR_WRITE_FLUSH(sc);
   10760 	delay(10);
   10761 	CSR_WRITE(sc, WMREG_CTRL, v);
   10762 	CSR_WRITE_FLUSH(sc);
   10763 	delay(10);
   10764 
   10765 	return data;
   10766 }
   10767 
   10768 #undef MDI_IO
   10769 #undef MDI_DIR
   10770 #undef MDI_CLK
   10771 
   10772 /*
   10773  * wm_gmii_i82543_readreg:	[mii interface function]
   10774  *
   10775  *	Read a PHY register on the GMII (i82543 version).
   10776  */
   10777 static int
   10778 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10779 {
   10780 	struct wm_softc *sc = device_private(dev);
   10781 
   10782 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10783 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10784 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10785 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   10786 
   10787 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   10788 		device_xname(dev), phy, reg, *val));
   10789 
   10790 	return 0;
   10791 }
   10792 
   10793 /*
   10794  * wm_gmii_i82543_writereg:	[mii interface function]
   10795  *
   10796  *	Write a PHY register on the GMII (i82543 version).
   10797  */
   10798 static int
   10799 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   10800 {
   10801 	struct wm_softc *sc = device_private(dev);
   10802 
   10803 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10804 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10805 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10806 	    (MII_COMMAND_START << 30), 32);
   10807 
   10808 	return 0;
   10809 }
   10810 
   10811 /*
   10812  * wm_gmii_mdic_readreg:	[mii interface function]
   10813  *
   10814  *	Read a PHY register on the GMII.
   10815  */
   10816 static int
   10817 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10818 {
   10819 	struct wm_softc *sc = device_private(dev);
   10820 	uint32_t mdic = 0;
   10821 	int i;
   10822 
   10823 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10824 	    && (reg > MII_ADDRMASK)) {
   10825 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10826 		    __func__, sc->sc_phytype, reg);
   10827 		reg &= MII_ADDRMASK;
   10828 	}
   10829 
   10830 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10831 	    MDIC_REGADD(reg));
   10832 
   10833 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10834 		delay(50);
   10835 		mdic = CSR_READ(sc, WMREG_MDIC);
   10836 		if (mdic & MDIC_READY)
   10837 			break;
   10838 	}
   10839 
   10840 	if ((mdic & MDIC_READY) == 0) {
   10841 		DPRINTF(sc, WM_DEBUG_GMII,
   10842 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   10843 			device_xname(dev), phy, reg));
   10844 		return ETIMEDOUT;
   10845 	} else if (mdic & MDIC_E) {
   10846 		/* This is normal if no PHY is present. */
   10847 		DPRINTF(sc, WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   10848 			device_xname(sc->sc_dev), phy, reg));
   10849 		return -1;
   10850 	} else
   10851 		*val = MDIC_DATA(mdic);
   10852 
   10853 	/*
   10854 	 * Allow some time after each MDIC transaction to avoid
   10855 	 * reading duplicate data in the next MDIC transaction.
   10856 	 */
   10857 	if (sc->sc_type == WM_T_PCH2)
   10858 		delay(100);
   10859 
   10860 	return 0;
   10861 }
   10862 
   10863 /*
   10864  * wm_gmii_mdic_writereg:	[mii interface function]
   10865  *
   10866  *	Write a PHY register on the GMII.
   10867  */
   10868 static int
   10869 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   10870 {
   10871 	struct wm_softc *sc = device_private(dev);
   10872 	uint32_t mdic = 0;
   10873 	int i;
   10874 
   10875 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10876 	    && (reg > MII_ADDRMASK)) {
   10877 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10878 		    __func__, sc->sc_phytype, reg);
   10879 		reg &= MII_ADDRMASK;
   10880 	}
   10881 
   10882 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10883 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10884 
   10885 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10886 		delay(50);
   10887 		mdic = CSR_READ(sc, WMREG_MDIC);
   10888 		if (mdic & MDIC_READY)
   10889 			break;
   10890 	}
   10891 
   10892 	if ((mdic & MDIC_READY) == 0) {
   10893 		DPRINTF(sc, WM_DEBUG_GMII,
   10894 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   10895 			device_xname(dev), phy, reg));
   10896 		return ETIMEDOUT;
   10897 	} else if (mdic & MDIC_E) {
   10898 		DPRINTF(sc, WM_DEBUG_GMII,
   10899 		    ("%s: MDIC write error: phy %d reg %d\n",
   10900 			device_xname(dev), phy, reg));
   10901 		return -1;
   10902 	}
   10903 
   10904 	/*
   10905 	 * Allow some time after each MDIC transaction to avoid
   10906 	 * reading duplicate data in the next MDIC transaction.
   10907 	 */
   10908 	if (sc->sc_type == WM_T_PCH2)
   10909 		delay(100);
   10910 
   10911 	return 0;
   10912 }
   10913 
   10914 /*
   10915  * wm_gmii_i82544_readreg:	[mii interface function]
   10916  *
   10917  *	Read a PHY register on the GMII.
   10918  */
   10919 static int
   10920 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10921 {
   10922 	struct wm_softc *sc = device_private(dev);
   10923 	int rv;
   10924 
   10925 	if (sc->phy.acquire(sc)) {
   10926 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10927 		return -1;
   10928 	}
   10929 
   10930 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   10931 
   10932 	sc->phy.release(sc);
   10933 
   10934 	return rv;
   10935 }
   10936 
   10937 static int
   10938 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10939 {
   10940 	struct wm_softc *sc = device_private(dev);
   10941 	int rv;
   10942 
   10943 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10944 		switch (sc->sc_phytype) {
   10945 		case WMPHY_IGP:
   10946 		case WMPHY_IGP_2:
   10947 		case WMPHY_IGP_3:
   10948 			rv = wm_gmii_mdic_writereg(dev, phy,
   10949 			    IGPHY_PAGE_SELECT, reg);
   10950 			if (rv != 0)
   10951 				return rv;
   10952 			break;
   10953 		default:
   10954 #ifdef WM_DEBUG
   10955 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10956 			    __func__, sc->sc_phytype, reg);
   10957 #endif
   10958 			break;
   10959 		}
   10960 	}
   10961 
   10962 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10963 }
   10964 
   10965 /*
   10966  * wm_gmii_i82544_writereg:	[mii interface function]
   10967  *
   10968  *	Write a PHY register on the GMII.
   10969  */
   10970 static int
   10971 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   10972 {
   10973 	struct wm_softc *sc = device_private(dev);
   10974 	int rv;
   10975 
   10976 	if (sc->phy.acquire(sc)) {
   10977 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10978 		return -1;
   10979 	}
   10980 
   10981 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10982 	sc->phy.release(sc);
   10983 
   10984 	return rv;
   10985 }
   10986 
   10987 static int
   10988 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10989 {
   10990 	struct wm_softc *sc = device_private(dev);
   10991 	int rv;
   10992 
   10993 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10994 		switch (sc->sc_phytype) {
   10995 		case WMPHY_IGP:
   10996 		case WMPHY_IGP_2:
   10997 		case WMPHY_IGP_3:
   10998 			rv = wm_gmii_mdic_writereg(dev, phy,
   10999 			    IGPHY_PAGE_SELECT, reg);
   11000 			if (rv != 0)
   11001 				return rv;
   11002 			break;
   11003 		default:
   11004 #ifdef WM_DEBUG
   11005 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   11006 			    __func__, sc->sc_phytype, reg);
   11007 #endif
   11008 			break;
   11009 		}
   11010 	}
   11011 
   11012 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11013 }
   11014 
   11015 /*
   11016  * wm_gmii_i80003_readreg:	[mii interface function]
   11017  *
   11018  *	Read a PHY register on the kumeran
   11019  * This could be handled by the PHY layer if we didn't have to lock the
   11020  * resource ...
   11021  */
   11022 static int
   11023 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11024 {
   11025 	struct wm_softc *sc = device_private(dev);
   11026 	int page_select;
   11027 	uint16_t temp, temp2;
   11028 	int rv = 0;
   11029 
   11030 	if (phy != 1) /* Only one PHY on kumeran bus */
   11031 		return -1;
   11032 
   11033 	if (sc->phy.acquire(sc)) {
   11034 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11035 		return -1;
   11036 	}
   11037 
   11038 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11039 		page_select = GG82563_PHY_PAGE_SELECT;
   11040 	else {
   11041 		/*
   11042 		 * Use Alternative Page Select register to access registers
   11043 		 * 30 and 31.
   11044 		 */
   11045 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11046 	}
   11047 	temp = reg >> GG82563_PAGE_SHIFT;
   11048 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11049 		goto out;
   11050 
   11051 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11052 		/*
   11053 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11054 		 * register.
   11055 		 */
   11056 		delay(200);
   11057 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11058 		if ((rv != 0) || (temp2 != temp)) {
   11059 			device_printf(dev, "%s failed\n", __func__);
   11060 			rv = -1;
   11061 			goto out;
   11062 		}
   11063 		delay(200);
   11064 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11065 		delay(200);
   11066 	} else
   11067 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11068 
   11069 out:
   11070 	sc->phy.release(sc);
   11071 	return rv;
   11072 }
   11073 
   11074 /*
   11075  * wm_gmii_i80003_writereg:	[mii interface function]
   11076  *
   11077  *	Write a PHY register on the kumeran.
   11078  * This could be handled by the PHY layer if we didn't have to lock the
   11079  * resource ...
   11080  */
   11081 static int
   11082 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   11083 {
   11084 	struct wm_softc *sc = device_private(dev);
   11085 	int page_select, rv;
   11086 	uint16_t temp, temp2;
   11087 
   11088 	if (phy != 1) /* Only one PHY on kumeran bus */
   11089 		return -1;
   11090 
   11091 	if (sc->phy.acquire(sc)) {
   11092 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11093 		return -1;
   11094 	}
   11095 
   11096 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11097 		page_select = GG82563_PHY_PAGE_SELECT;
   11098 	else {
   11099 		/*
   11100 		 * Use Alternative Page Select register to access registers
   11101 		 * 30 and 31.
   11102 		 */
   11103 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11104 	}
   11105 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   11106 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11107 		goto out;
   11108 
   11109 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11110 		/*
   11111 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11112 		 * register.
   11113 		 */
   11114 		delay(200);
   11115 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11116 		if ((rv != 0) || (temp2 != temp)) {
   11117 			device_printf(dev, "%s failed\n", __func__);
   11118 			rv = -1;
   11119 			goto out;
   11120 		}
   11121 		delay(200);
   11122 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11123 		delay(200);
   11124 	} else
   11125 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11126 
   11127 out:
   11128 	sc->phy.release(sc);
   11129 	return rv;
   11130 }
   11131 
   11132 /*
   11133  * wm_gmii_bm_readreg:	[mii interface function]
   11134  *
   11135  *	Read a PHY register on the kumeran
   11136  * This could be handled by the PHY layer if we didn't have to lock the
   11137  * resource ...
   11138  */
   11139 static int
   11140 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11141 {
   11142 	struct wm_softc *sc = device_private(dev);
   11143 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11144 	int rv;
   11145 
   11146 	if (sc->phy.acquire(sc)) {
   11147 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11148 		return -1;
   11149 	}
   11150 
   11151 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11152 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11153 		    || (reg == 31)) ? 1 : phy;
   11154 	/* Page 800 works differently than the rest so it has its own func */
   11155 	if (page == BM_WUC_PAGE) {
   11156 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11157 		goto release;
   11158 	}
   11159 
   11160 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11161 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11162 		    && (sc->sc_type != WM_T_82583))
   11163 			rv = wm_gmii_mdic_writereg(dev, phy,
   11164 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11165 		else
   11166 			rv = wm_gmii_mdic_writereg(dev, phy,
   11167 			    BME1000_PHY_PAGE_SELECT, page);
   11168 		if (rv != 0)
   11169 			goto release;
   11170 	}
   11171 
   11172 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11173 
   11174 release:
   11175 	sc->phy.release(sc);
   11176 	return rv;
   11177 }
   11178 
   11179 /*
   11180  * wm_gmii_bm_writereg:	[mii interface function]
   11181  *
   11182  *	Write a PHY register on the kumeran.
   11183  * This could be handled by the PHY layer if we didn't have to lock the
   11184  * resource ...
   11185  */
   11186 static int
   11187 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   11188 {
   11189 	struct wm_softc *sc = device_private(dev);
   11190 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11191 	int rv;
   11192 
   11193 	if (sc->phy.acquire(sc)) {
   11194 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11195 		return -1;
   11196 	}
   11197 
   11198 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11199 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11200 		    || (reg == 31)) ? 1 : phy;
   11201 	/* Page 800 works differently than the rest so it has its own func */
   11202 	if (page == BM_WUC_PAGE) {
   11203 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   11204 		goto release;
   11205 	}
   11206 
   11207 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11208 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11209 		    && (sc->sc_type != WM_T_82583))
   11210 			rv = wm_gmii_mdic_writereg(dev, phy,
   11211 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11212 		else
   11213 			rv = wm_gmii_mdic_writereg(dev, phy,
   11214 			    BME1000_PHY_PAGE_SELECT, page);
   11215 		if (rv != 0)
   11216 			goto release;
   11217 	}
   11218 
   11219 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11220 
   11221 release:
   11222 	sc->phy.release(sc);
   11223 	return rv;
   11224 }
   11225 
   11226 /*
   11227  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   11228  *  @dev: pointer to the HW structure
   11229  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   11230  *
   11231  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   11232  *  address to store contents of the BM_WUC_ENABLE_REG register.
   11233  */
   11234 static int
   11235 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11236 {
   11237 #ifdef WM_DEBUG
   11238 	struct wm_softc *sc = device_private(dev);
   11239 #endif
   11240 	uint16_t temp;
   11241 	int rv;
   11242 
   11243 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11244 		device_xname(dev), __func__));
   11245 
   11246 	if (!phy_regp)
   11247 		return -1;
   11248 
   11249 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   11250 
   11251 	/* Select Port Control Registers page */
   11252 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11253 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11254 	if (rv != 0)
   11255 		return rv;
   11256 
   11257 	/* Read WUCE and save it */
   11258 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   11259 	if (rv != 0)
   11260 		return rv;
   11261 
   11262 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   11263 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   11264 	 */
   11265 	temp = *phy_regp;
   11266 	temp |= BM_WUC_ENABLE_BIT;
   11267 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   11268 
   11269 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   11270 		return rv;
   11271 
   11272 	/* Select Host Wakeup Registers page - caller now able to write
   11273 	 * registers on the Wakeup registers page
   11274 	 */
   11275 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11276 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   11277 }
   11278 
   11279 /*
   11280  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   11281  *  @dev: pointer to the HW structure
   11282  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   11283  *
   11284  *  Restore BM_WUC_ENABLE_REG to its original value.
   11285  *
   11286  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   11287  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   11288  *  caller.
   11289  */
   11290 static int
   11291 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11292 {
   11293 #ifdef WM_DEBUG
   11294 	struct wm_softc *sc = device_private(dev);
   11295 #endif
   11296 
   11297 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11298 		device_xname(dev), __func__));
   11299 
   11300 	if (!phy_regp)
   11301 		return -1;
   11302 
   11303 	/* Select Port Control Registers page */
   11304 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11305 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11306 
   11307 	/* Restore 769.17 to its original value */
   11308 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   11309 
   11310 	return 0;
   11311 }
   11312 
   11313 /*
   11314  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   11315  *  @sc: pointer to the HW structure
   11316  *  @offset: register offset to be read or written
   11317  *  @val: pointer to the data to read or write
   11318  *  @rd: determines if operation is read or write
   11319  *  @page_set: BM_WUC_PAGE already set and access enabled
   11320  *
   11321  *  Read the PHY register at offset and store the retrieved information in
   11322  *  data, or write data to PHY register at offset.  Note the procedure to
   11323  *  access the PHY wakeup registers is different than reading the other PHY
   11324  *  registers. It works as such:
   11325  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11326  *  2) Set page to 800 for host (801 if we were manageability)
   11327  *  3) Write the address using the address opcode (0x11)
   11328  *  4) Read or write the data using the data opcode (0x12)
   11329  *  5) Restore 769.17.2 to its original value
   11330  *
   11331  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11332  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11333  *
   11334  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11335  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11336  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11337  */
   11338 static int
   11339 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11340 	bool page_set)
   11341 {
   11342 	struct wm_softc *sc = device_private(dev);
   11343 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11344 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11345 	uint16_t wuce;
   11346 	int rv = 0;
   11347 
   11348 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11349 		device_xname(dev), __func__));
   11350 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11351 	if ((sc->sc_type == WM_T_PCH)
   11352 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11353 		device_printf(dev,
   11354 		    "Attempting to access page %d while gig enabled.\n", page);
   11355 	}
   11356 
   11357 	if (!page_set) {
   11358 		/* Enable access to PHY wakeup registers */
   11359 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11360 		if (rv != 0) {
   11361 			device_printf(dev,
   11362 			    "%s: Could not enable PHY wakeup reg access\n",
   11363 			    __func__);
   11364 			return rv;
   11365 		}
   11366 	}
   11367 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11368 		device_xname(sc->sc_dev), __func__, page, regnum));
   11369 
   11370 	/*
   11371 	 * 2) Access PHY wakeup register.
   11372 	 * See wm_access_phy_wakeup_reg_bm.
   11373 	 */
   11374 
   11375 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11376 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11377 	if (rv != 0)
   11378 		return rv;
   11379 
   11380 	if (rd) {
   11381 		/* Read the Wakeup register page value using opcode 0x12 */
   11382 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11383 	} else {
   11384 		/* Write the Wakeup register page value using opcode 0x12 */
   11385 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11386 	}
   11387 	if (rv != 0)
   11388 		return rv;
   11389 
   11390 	if (!page_set)
   11391 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11392 
   11393 	return rv;
   11394 }
   11395 
   11396 /*
   11397  * wm_gmii_hv_readreg:	[mii interface function]
   11398  *
   11399  *	Read a PHY register on the kumeran
   11400  * This could be handled by the PHY layer if we didn't have to lock the
   11401  * resource ...
   11402  */
   11403 static int
   11404 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11405 {
   11406 	struct wm_softc *sc = device_private(dev);
   11407 	int rv;
   11408 
   11409 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11410 		device_xname(dev), __func__));
   11411 	if (sc->phy.acquire(sc)) {
   11412 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11413 		return -1;
   11414 	}
   11415 
   11416 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11417 	sc->phy.release(sc);
   11418 	return rv;
   11419 }
   11420 
   11421 static int
   11422 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11423 {
   11424 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11425 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11426 	int rv;
   11427 
   11428 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11429 
   11430 	/* Page 800 works differently than the rest so it has its own func */
   11431 	if (page == BM_WUC_PAGE)
   11432 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11433 
   11434 	/*
   11435 	 * Lower than page 768 works differently than the rest so it has its
   11436 	 * own func
   11437 	 */
   11438 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11439 		device_printf(dev, "gmii_hv_readreg!!!\n");
   11440 		return -1;
   11441 	}
   11442 
   11443 	/*
   11444 	 * XXX I21[789] documents say that the SMBus Address register is at
   11445 	 * PHY address 01, Page 0 (not 768), Register 26.
   11446 	 */
   11447 	if (page == HV_INTC_FC_PAGE_START)
   11448 		page = 0;
   11449 
   11450 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11451 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11452 		    page << BME1000_PAGE_SHIFT);
   11453 		if (rv != 0)
   11454 			return rv;
   11455 	}
   11456 
   11457 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11458 }
   11459 
   11460 /*
   11461  * wm_gmii_hv_writereg:	[mii interface function]
   11462  *
   11463  *	Write a PHY register on the kumeran.
   11464  * This could be handled by the PHY layer if we didn't have to lock the
   11465  * resource ...
   11466  */
   11467 static int
   11468 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11469 {
   11470 	struct wm_softc *sc = device_private(dev);
   11471 	int rv;
   11472 
   11473 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11474 		device_xname(dev), __func__));
   11475 
   11476 	if (sc->phy.acquire(sc)) {
   11477 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11478 		return -1;
   11479 	}
   11480 
   11481 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11482 	sc->phy.release(sc);
   11483 
   11484 	return rv;
   11485 }
   11486 
   11487 static int
   11488 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11489 {
   11490 	struct wm_softc *sc = device_private(dev);
   11491 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11492 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11493 	int rv;
   11494 
   11495 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11496 
   11497 	/* Page 800 works differently than the rest so it has its own func */
   11498 	if (page == BM_WUC_PAGE)
   11499 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11500 		    false);
   11501 
   11502 	/*
   11503 	 * Lower than page 768 works differently than the rest so it has its
   11504 	 * own func
   11505 	 */
   11506 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11507 		device_printf(dev, "gmii_hv_writereg!!!\n");
   11508 		return -1;
   11509 	}
   11510 
   11511 	{
   11512 		/*
   11513 		 * XXX I21[789] documents say that the SMBus Address register
   11514 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11515 		 */
   11516 		if (page == HV_INTC_FC_PAGE_START)
   11517 			page = 0;
   11518 
   11519 		/*
   11520 		 * XXX Workaround MDIO accesses being disabled after entering
   11521 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11522 		 * register is set)
   11523 		 */
   11524 		if (sc->sc_phytype == WMPHY_82578) {
   11525 			struct mii_softc *child;
   11526 
   11527 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11528 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11529 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11530 			    && ((val & (1 << 11)) != 0)) {
   11531 				device_printf(dev, "XXX need workaround\n");
   11532 			}
   11533 		}
   11534 
   11535 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11536 			rv = wm_gmii_mdic_writereg(dev, 1,
   11537 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11538 			if (rv != 0)
   11539 				return rv;
   11540 		}
   11541 	}
   11542 
   11543 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11544 }
   11545 
   11546 /*
   11547  * wm_gmii_82580_readreg:	[mii interface function]
   11548  *
   11549  *	Read a PHY register on the 82580 and I350.
   11550  * This could be handled by the PHY layer if we didn't have to lock the
   11551  * resource ...
   11552  */
   11553 static int
   11554 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11555 {
   11556 	struct wm_softc *sc = device_private(dev);
   11557 	int rv;
   11558 
   11559 	if (sc->phy.acquire(sc) != 0) {
   11560 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11561 		return -1;
   11562 	}
   11563 
   11564 #ifdef DIAGNOSTIC
   11565 	if (reg > MII_ADDRMASK) {
   11566 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11567 		    __func__, sc->sc_phytype, reg);
   11568 		reg &= MII_ADDRMASK;
   11569 	}
   11570 #endif
   11571 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11572 
   11573 	sc->phy.release(sc);
   11574 	return rv;
   11575 }
   11576 
   11577 /*
   11578  * wm_gmii_82580_writereg:	[mii interface function]
   11579  *
   11580  *	Write a PHY register on the 82580 and I350.
   11581  * This could be handled by the PHY layer if we didn't have to lock the
   11582  * resource ...
   11583  */
   11584 static int
   11585 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11586 {
   11587 	struct wm_softc *sc = device_private(dev);
   11588 	int rv;
   11589 
   11590 	if (sc->phy.acquire(sc) != 0) {
   11591 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11592 		return -1;
   11593 	}
   11594 
   11595 #ifdef DIAGNOSTIC
   11596 	if (reg > MII_ADDRMASK) {
   11597 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11598 		    __func__, sc->sc_phytype, reg);
   11599 		reg &= MII_ADDRMASK;
   11600 	}
   11601 #endif
   11602 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11603 
   11604 	sc->phy.release(sc);
   11605 	return rv;
   11606 }
   11607 
   11608 /*
   11609  * wm_gmii_gs40g_readreg:	[mii interface function]
   11610  *
   11611  *	Read a PHY register on the I2100 and I211.
   11612  * This could be handled by the PHY layer if we didn't have to lock the
   11613  * resource ...
   11614  */
   11615 static int
   11616 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11617 {
   11618 	struct wm_softc *sc = device_private(dev);
   11619 	int page, offset;
   11620 	int rv;
   11621 
   11622 	/* Acquire semaphore */
   11623 	if (sc->phy.acquire(sc)) {
   11624 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11625 		return -1;
   11626 	}
   11627 
   11628 	/* Page select */
   11629 	page = reg >> GS40G_PAGE_SHIFT;
   11630 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11631 	if (rv != 0)
   11632 		goto release;
   11633 
   11634 	/* Read reg */
   11635 	offset = reg & GS40G_OFFSET_MASK;
   11636 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11637 
   11638 release:
   11639 	sc->phy.release(sc);
   11640 	return rv;
   11641 }
   11642 
   11643 /*
   11644  * wm_gmii_gs40g_writereg:	[mii interface function]
   11645  *
   11646  *	Write a PHY register on the I210 and I211.
   11647  * This could be handled by the PHY layer if we didn't have to lock the
   11648  * resource ...
   11649  */
   11650 static int
   11651 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11652 {
   11653 	struct wm_softc *sc = device_private(dev);
   11654 	uint16_t page;
   11655 	int offset, rv;
   11656 
   11657 	/* Acquire semaphore */
   11658 	if (sc->phy.acquire(sc)) {
   11659 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11660 		return -1;
   11661 	}
   11662 
   11663 	/* Page select */
   11664 	page = reg >> GS40G_PAGE_SHIFT;
   11665 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11666 	if (rv != 0)
   11667 		goto release;
   11668 
   11669 	/* Write reg */
   11670 	offset = reg & GS40G_OFFSET_MASK;
   11671 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11672 
   11673 release:
   11674 	/* Release semaphore */
   11675 	sc->phy.release(sc);
   11676 	return rv;
   11677 }
   11678 
   11679 /*
   11680  * wm_gmii_statchg:	[mii interface function]
   11681  *
   11682  *	Callback from MII layer when media changes.
   11683  */
   11684 static void
   11685 wm_gmii_statchg(struct ifnet *ifp)
   11686 {
   11687 	struct wm_softc *sc = ifp->if_softc;
   11688 	struct mii_data *mii = &sc->sc_mii;
   11689 
   11690 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11691 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11692 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11693 
   11694 	/* Get flow control negotiation result. */
   11695 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11696 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11697 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11698 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11699 	}
   11700 
   11701 	if (sc->sc_flowflags & IFM_FLOW) {
   11702 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11703 			sc->sc_ctrl |= CTRL_TFCE;
   11704 			sc->sc_fcrtl |= FCRTL_XONE;
   11705 		}
   11706 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11707 			sc->sc_ctrl |= CTRL_RFCE;
   11708 	}
   11709 
   11710 	if (mii->mii_media_active & IFM_FDX) {
   11711 		DPRINTF(sc, WM_DEBUG_LINK,
   11712 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11713 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11714 	} else {
   11715 		DPRINTF(sc, WM_DEBUG_LINK,
   11716 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11717 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11718 	}
   11719 
   11720 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11721 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11722 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11723 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11724 	if (sc->sc_type == WM_T_80003) {
   11725 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   11726 		case IFM_1000_T:
   11727 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11728 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11729 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11730 			break;
   11731 		default:
   11732 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11733 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11734 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11735 			break;
   11736 		}
   11737 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11738 	}
   11739 }
   11740 
   11741 /* kumeran related (80003, ICH* and PCH*) */
   11742 
   11743 /*
   11744  * wm_kmrn_readreg:
   11745  *
   11746  *	Read a kumeran register
   11747  */
   11748 static int
   11749 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11750 {
   11751 	int rv;
   11752 
   11753 	if (sc->sc_type == WM_T_80003)
   11754 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11755 	else
   11756 		rv = sc->phy.acquire(sc);
   11757 	if (rv != 0) {
   11758 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11759 		    __func__);
   11760 		return rv;
   11761 	}
   11762 
   11763 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11764 
   11765 	if (sc->sc_type == WM_T_80003)
   11766 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11767 	else
   11768 		sc->phy.release(sc);
   11769 
   11770 	return rv;
   11771 }
   11772 
   11773 static int
   11774 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11775 {
   11776 
   11777 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11778 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11779 	    KUMCTRLSTA_REN);
   11780 	CSR_WRITE_FLUSH(sc);
   11781 	delay(2);
   11782 
   11783 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11784 
   11785 	return 0;
   11786 }
   11787 
   11788 /*
   11789  * wm_kmrn_writereg:
   11790  *
   11791  *	Write a kumeran register
   11792  */
   11793 static int
   11794 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11795 {
   11796 	int rv;
   11797 
   11798 	if (sc->sc_type == WM_T_80003)
   11799 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11800 	else
   11801 		rv = sc->phy.acquire(sc);
   11802 	if (rv != 0) {
   11803 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11804 		    __func__);
   11805 		return rv;
   11806 	}
   11807 
   11808 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11809 
   11810 	if (sc->sc_type == WM_T_80003)
   11811 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11812 	else
   11813 		sc->phy.release(sc);
   11814 
   11815 	return rv;
   11816 }
   11817 
   11818 static int
   11819 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11820 {
   11821 
   11822 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11823 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11824 
   11825 	return 0;
   11826 }
   11827 
   11828 /*
   11829  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   11830  * This access method is different from IEEE MMD.
   11831  */
   11832 static int
   11833 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   11834 {
   11835 	struct wm_softc *sc = device_private(dev);
   11836 	int rv;
   11837 
   11838 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   11839 	if (rv != 0)
   11840 		return rv;
   11841 
   11842 	if (rd)
   11843 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   11844 	else
   11845 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   11846 	return rv;
   11847 }
   11848 
   11849 static int
   11850 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   11851 {
   11852 
   11853 	return wm_access_emi_reg_locked(dev, reg, val, true);
   11854 }
   11855 
   11856 static int
   11857 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   11858 {
   11859 
   11860 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   11861 }
   11862 
   11863 /* SGMII related */
   11864 
   11865 /*
   11866  * wm_sgmii_uses_mdio
   11867  *
   11868  * Check whether the transaction is to the internal PHY or the external
   11869  * MDIO interface. Return true if it's MDIO.
   11870  */
   11871 static bool
   11872 wm_sgmii_uses_mdio(struct wm_softc *sc)
   11873 {
   11874 	uint32_t reg;
   11875 	bool ismdio = false;
   11876 
   11877 	switch (sc->sc_type) {
   11878 	case WM_T_82575:
   11879 	case WM_T_82576:
   11880 		reg = CSR_READ(sc, WMREG_MDIC);
   11881 		ismdio = ((reg & MDIC_DEST) != 0);
   11882 		break;
   11883 	case WM_T_82580:
   11884 	case WM_T_I350:
   11885 	case WM_T_I354:
   11886 	case WM_T_I210:
   11887 	case WM_T_I211:
   11888 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11889 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11890 		break;
   11891 	default:
   11892 		break;
   11893 	}
   11894 
   11895 	return ismdio;
   11896 }
   11897 
   11898 /* Setup internal SGMII PHY for SFP */
   11899 static void
   11900 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
   11901 {
   11902 	uint16_t id1, id2, phyreg;
   11903 	int i, rv;
   11904 
   11905 	if (((sc->sc_flags & WM_F_SGMII) == 0)
   11906 	    || ((sc->sc_flags & WM_F_SFP) == 0))
   11907 		return;
   11908 
   11909 	for (i = 0; i < MII_NPHY; i++) {
   11910 		sc->phy.no_errprint = true;
   11911 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
   11912 		if (rv != 0)
   11913 			continue;
   11914 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
   11915 		if (rv != 0)
   11916 			continue;
   11917 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
   11918 			continue;
   11919 		sc->phy.no_errprint = false;
   11920 
   11921 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
   11922 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
   11923 		phyreg |= ESSR_SGMII_WOC_COPPER;
   11924 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
   11925 		break;
   11926 	}
   11927 
   11928 }
   11929 
   11930 /*
   11931  * wm_sgmii_readreg:	[mii interface function]
   11932  *
   11933  *	Read a PHY register on the SGMII
   11934  * This could be handled by the PHY layer if we didn't have to lock the
   11935  * resource ...
   11936  */
   11937 static int
   11938 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11939 {
   11940 	struct wm_softc *sc = device_private(dev);
   11941 	int rv;
   11942 
   11943 	if (sc->phy.acquire(sc)) {
   11944 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11945 		return -1;
   11946 	}
   11947 
   11948 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   11949 
   11950 	sc->phy.release(sc);
   11951 	return rv;
   11952 }
   11953 
   11954 static int
   11955 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11956 {
   11957 	struct wm_softc *sc = device_private(dev);
   11958 	uint32_t i2ccmd;
   11959 	int i, rv = 0;
   11960 
   11961 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11962 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11963 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11964 
   11965 	/* Poll the ready bit */
   11966 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11967 		delay(50);
   11968 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11969 		if (i2ccmd & I2CCMD_READY)
   11970 			break;
   11971 	}
   11972 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11973 		device_printf(dev, "I2CCMD Read did not complete\n");
   11974 		rv = ETIMEDOUT;
   11975 	}
   11976 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11977 		if (!sc->phy.no_errprint)
   11978 			device_printf(dev, "I2CCMD Error bit set\n");
   11979 		rv = EIO;
   11980 	}
   11981 
   11982 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11983 
   11984 	return rv;
   11985 }
   11986 
   11987 /*
   11988  * wm_sgmii_writereg:	[mii interface function]
   11989  *
   11990  *	Write a PHY register on the SGMII.
   11991  * This could be handled by the PHY layer if we didn't have to lock the
   11992  * resource ...
   11993  */
   11994 static int
   11995 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   11996 {
   11997 	struct wm_softc *sc = device_private(dev);
   11998 	int rv;
   11999 
   12000 	if (sc->phy.acquire(sc) != 0) {
   12001 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12002 		return -1;
   12003 	}
   12004 
   12005 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   12006 
   12007 	sc->phy.release(sc);
   12008 
   12009 	return rv;
   12010 }
   12011 
   12012 static int
   12013 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12014 {
   12015 	struct wm_softc *sc = device_private(dev);
   12016 	uint32_t i2ccmd;
   12017 	uint16_t swapdata;
   12018 	int rv = 0;
   12019 	int i;
   12020 
   12021 	/* Swap the data bytes for the I2C interface */
   12022 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   12023 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12024 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   12025 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12026 
   12027 	/* Poll the ready bit */
   12028 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12029 		delay(50);
   12030 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12031 		if (i2ccmd & I2CCMD_READY)
   12032 			break;
   12033 	}
   12034 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12035 		device_printf(dev, "I2CCMD Write did not complete\n");
   12036 		rv = ETIMEDOUT;
   12037 	}
   12038 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12039 		device_printf(dev, "I2CCMD Error bit set\n");
   12040 		rv = EIO;
   12041 	}
   12042 
   12043 	return rv;
   12044 }
   12045 
   12046 /* TBI related */
   12047 
   12048 static bool
   12049 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   12050 {
   12051 	bool sig;
   12052 
   12053 	sig = ctrl & CTRL_SWDPIN(1);
   12054 
   12055 	/*
   12056 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   12057 	 * detect a signal, 1 if they don't.
   12058 	 */
   12059 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   12060 		sig = !sig;
   12061 
   12062 	return sig;
   12063 }
   12064 
   12065 /*
   12066  * wm_tbi_mediainit:
   12067  *
   12068  *	Initialize media for use on 1000BASE-X devices.
   12069  */
   12070 static void
   12071 wm_tbi_mediainit(struct wm_softc *sc)
   12072 {
   12073 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12074 	const char *sep = "";
   12075 
   12076 	if (sc->sc_type < WM_T_82543)
   12077 		sc->sc_tipg = TIPG_WM_DFLT;
   12078 	else
   12079 		sc->sc_tipg = TIPG_LG_DFLT;
   12080 
   12081 	sc->sc_tbi_serdes_anegticks = 5;
   12082 
   12083 	/* Initialize our media structures */
   12084 	sc->sc_mii.mii_ifp = ifp;
   12085 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   12086 
   12087 	ifp->if_baudrate = IF_Gbps(1);
   12088 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   12089 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12090 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12091 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   12092 		    sc->sc_core_lock);
   12093 	} else {
   12094 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12095 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   12096 	}
   12097 
   12098 	/*
   12099 	 * SWD Pins:
   12100 	 *
   12101 	 *	0 = Link LED (output)
   12102 	 *	1 = Loss Of Signal (input)
   12103 	 */
   12104 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   12105 
   12106 	/* XXX Perhaps this is only for TBI */
   12107 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12108 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   12109 
   12110 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   12111 		sc->sc_ctrl &= ~CTRL_LRST;
   12112 
   12113 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12114 
   12115 #define	ADD(ss, mm, dd)							\
   12116 do {									\
   12117 	aprint_normal("%s%s", sep, ss);					\
   12118 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   12119 	sep = ", ";							\
   12120 } while (/*CONSTCOND*/0)
   12121 
   12122 	aprint_normal_dev(sc->sc_dev, "");
   12123 
   12124 	if (sc->sc_type == WM_T_I354) {
   12125 		uint32_t status;
   12126 
   12127 		status = CSR_READ(sc, WMREG_STATUS);
   12128 		if (((status & STATUS_2P5_SKU) != 0)
   12129 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12130 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   12131 		} else
   12132 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   12133 	} else if (sc->sc_type == WM_T_82545) {
   12134 		/* Only 82545 is LX (XXX except SFP) */
   12135 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12136 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12137 	} else if (sc->sc_sfptype != 0) {
   12138 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   12139 		switch (sc->sc_sfptype) {
   12140 		default:
   12141 		case SFF_SFP_ETH_FLAGS_1000SX:
   12142 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12143 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12144 			break;
   12145 		case SFF_SFP_ETH_FLAGS_1000LX:
   12146 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12147 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12148 			break;
   12149 		case SFF_SFP_ETH_FLAGS_1000CX:
   12150 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   12151 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   12152 			break;
   12153 		case SFF_SFP_ETH_FLAGS_1000T:
   12154 			ADD("1000baseT", IFM_1000_T, 0);
   12155 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   12156 			break;
   12157 		case SFF_SFP_ETH_FLAGS_100FX:
   12158 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   12159 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   12160 			break;
   12161 		}
   12162 	} else {
   12163 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12164 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12165 	}
   12166 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   12167 	aprint_normal("\n");
   12168 
   12169 #undef ADD
   12170 
   12171 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   12172 }
   12173 
   12174 /*
   12175  * wm_tbi_mediachange:	[ifmedia interface function]
   12176  *
   12177  *	Set hardware to newly-selected media on a 1000BASE-X device.
   12178  */
   12179 static int
   12180 wm_tbi_mediachange(struct ifnet *ifp)
   12181 {
   12182 	struct wm_softc *sc = ifp->if_softc;
   12183 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12184 	uint32_t status, ctrl;
   12185 	bool signal;
   12186 	int i;
   12187 
   12188 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   12189 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12190 		/* XXX need some work for >= 82571 and < 82575 */
   12191 		if (sc->sc_type < WM_T_82575)
   12192 			return 0;
   12193 	}
   12194 
   12195 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12196 	    || (sc->sc_type >= WM_T_82575))
   12197 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12198 
   12199 	sc->sc_ctrl &= ~CTRL_LRST;
   12200 	sc->sc_txcw = TXCW_ANE;
   12201 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12202 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   12203 	else if (ife->ifm_media & IFM_FDX)
   12204 		sc->sc_txcw |= TXCW_FD;
   12205 	else
   12206 		sc->sc_txcw |= TXCW_HD;
   12207 
   12208 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   12209 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   12210 
   12211 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   12212 		device_xname(sc->sc_dev), sc->sc_txcw));
   12213 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12214 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12215 	CSR_WRITE_FLUSH(sc);
   12216 	delay(1000);
   12217 
   12218 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12219 	signal = wm_tbi_havesignal(sc, ctrl);
   12220 
   12221 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   12222 		signal));
   12223 
   12224 	if (signal) {
   12225 		/* Have signal; wait for the link to come up. */
   12226 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   12227 			delay(10000);
   12228 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   12229 				break;
   12230 		}
   12231 
   12232 		DPRINTF(sc, WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   12233 			device_xname(sc->sc_dev), i));
   12234 
   12235 		status = CSR_READ(sc, WMREG_STATUS);
   12236 		DPRINTF(sc, WM_DEBUG_LINK,
   12237 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   12238 			device_xname(sc->sc_dev), status, STATUS_LU));
   12239 		if (status & STATUS_LU) {
   12240 			/* Link is up. */
   12241 			DPRINTF(sc, WM_DEBUG_LINK,
   12242 			    ("%s: LINK: set media -> link up %s\n",
   12243 				device_xname(sc->sc_dev),
   12244 				(status & STATUS_FD) ? "FDX" : "HDX"));
   12245 
   12246 			/*
   12247 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   12248 			 * so we should update sc->sc_ctrl
   12249 			 */
   12250 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   12251 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12252 			sc->sc_fcrtl &= ~FCRTL_XONE;
   12253 			if (status & STATUS_FD)
   12254 				sc->sc_tctl |=
   12255 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12256 			else
   12257 				sc->sc_tctl |=
   12258 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12259 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   12260 				sc->sc_fcrtl |= FCRTL_XONE;
   12261 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12262 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12263 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12264 			sc->sc_tbi_linkup = 1;
   12265 		} else {
   12266 			if (i == WM_LINKUP_TIMEOUT)
   12267 				wm_check_for_link(sc);
   12268 			/* Link is down. */
   12269 			DPRINTF(sc, WM_DEBUG_LINK,
   12270 			    ("%s: LINK: set media -> link down\n",
   12271 				device_xname(sc->sc_dev)));
   12272 			sc->sc_tbi_linkup = 0;
   12273 		}
   12274 	} else {
   12275 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   12276 			device_xname(sc->sc_dev)));
   12277 		sc->sc_tbi_linkup = 0;
   12278 	}
   12279 
   12280 	wm_tbi_serdes_set_linkled(sc);
   12281 
   12282 	return 0;
   12283 }
   12284 
   12285 /*
   12286  * wm_tbi_mediastatus:	[ifmedia interface function]
   12287  *
   12288  *	Get the current interface media status on a 1000BASE-X device.
   12289  */
   12290 static void
   12291 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12292 {
   12293 	struct wm_softc *sc = ifp->if_softc;
   12294 	uint32_t ctrl, status;
   12295 
   12296 	ifmr->ifm_status = IFM_AVALID;
   12297 	ifmr->ifm_active = IFM_ETHER;
   12298 
   12299 	status = CSR_READ(sc, WMREG_STATUS);
   12300 	if ((status & STATUS_LU) == 0) {
   12301 		ifmr->ifm_active |= IFM_NONE;
   12302 		return;
   12303 	}
   12304 
   12305 	ifmr->ifm_status |= IFM_ACTIVE;
   12306 	/* Only 82545 is LX */
   12307 	if (sc->sc_type == WM_T_82545)
   12308 		ifmr->ifm_active |= IFM_1000_LX;
   12309 	else
   12310 		ifmr->ifm_active |= IFM_1000_SX;
   12311 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   12312 		ifmr->ifm_active |= IFM_FDX;
   12313 	else
   12314 		ifmr->ifm_active |= IFM_HDX;
   12315 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12316 	if (ctrl & CTRL_RFCE)
   12317 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   12318 	if (ctrl & CTRL_TFCE)
   12319 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   12320 }
   12321 
   12322 /* XXX TBI only */
   12323 static int
   12324 wm_check_for_link(struct wm_softc *sc)
   12325 {
   12326 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12327 	uint32_t rxcw;
   12328 	uint32_t ctrl;
   12329 	uint32_t status;
   12330 	bool signal;
   12331 
   12332 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
   12333 		device_xname(sc->sc_dev), __func__));
   12334 
   12335 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12336 		/* XXX need some work for >= 82571 */
   12337 		if (sc->sc_type >= WM_T_82571) {
   12338 			sc->sc_tbi_linkup = 1;
   12339 			return 0;
   12340 		}
   12341 	}
   12342 
   12343 	rxcw = CSR_READ(sc, WMREG_RXCW);
   12344 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12345 	status = CSR_READ(sc, WMREG_STATUS);
   12346 	signal = wm_tbi_havesignal(sc, ctrl);
   12347 
   12348 	DPRINTF(sc, WM_DEBUG_LINK,
   12349 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   12350 		device_xname(sc->sc_dev), __func__, signal,
   12351 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   12352 
   12353 	/*
   12354 	 * SWDPIN   LU RXCW
   12355 	 *	0    0	  0
   12356 	 *	0    0	  1	(should not happen)
   12357 	 *	0    1	  0	(should not happen)
   12358 	 *	0    1	  1	(should not happen)
   12359 	 *	1    0	  0	Disable autonego and force linkup
   12360 	 *	1    0	  1	got /C/ but not linkup yet
   12361 	 *	1    1	  0	(linkup)
   12362 	 *	1    1	  1	If IFM_AUTO, back to autonego
   12363 	 *
   12364 	 */
   12365 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   12366 		DPRINTF(sc, WM_DEBUG_LINK,
   12367 		    ("%s: %s: force linkup and fullduplex\n",
   12368 			device_xname(sc->sc_dev), __func__));
   12369 		sc->sc_tbi_linkup = 0;
   12370 		/* Disable auto-negotiation in the TXCW register */
   12371 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   12372 
   12373 		/*
   12374 		 * Force link-up and also force full-duplex.
   12375 		 *
   12376 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   12377 		 * so we should update sc->sc_ctrl
   12378 		 */
   12379 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   12380 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12381 	} else if (((status & STATUS_LU) != 0)
   12382 	    && ((rxcw & RXCW_C) != 0)
   12383 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12384 		sc->sc_tbi_linkup = 1;
   12385 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12386 			device_xname(sc->sc_dev),
   12387 			__func__));
   12388 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12389 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12390 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   12391 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
   12392 			device_xname(sc->sc_dev), __func__));
   12393 	} else {
   12394 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12395 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12396 			status));
   12397 	}
   12398 
   12399 	return 0;
   12400 }
   12401 
   12402 /*
   12403  * wm_tbi_tick:
   12404  *
   12405  *	Check the link on TBI devices.
   12406  *	This function acts as mii_tick().
   12407  */
   12408 static void
   12409 wm_tbi_tick(struct wm_softc *sc)
   12410 {
   12411 	struct mii_data *mii = &sc->sc_mii;
   12412 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12413 	uint32_t status;
   12414 
   12415 	KASSERT(WM_CORE_LOCKED(sc));
   12416 
   12417 	status = CSR_READ(sc, WMREG_STATUS);
   12418 
   12419 	/* XXX is this needed? */
   12420 	(void)CSR_READ(sc, WMREG_RXCW);
   12421 	(void)CSR_READ(sc, WMREG_CTRL);
   12422 
   12423 	/* set link status */
   12424 	if ((status & STATUS_LU) == 0) {
   12425 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12426 			device_xname(sc->sc_dev)));
   12427 		sc->sc_tbi_linkup = 0;
   12428 	} else if (sc->sc_tbi_linkup == 0) {
   12429 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12430 			device_xname(sc->sc_dev),
   12431 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12432 		sc->sc_tbi_linkup = 1;
   12433 		sc->sc_tbi_serdes_ticks = 0;
   12434 	}
   12435 
   12436 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12437 		goto setled;
   12438 
   12439 	if ((status & STATUS_LU) == 0) {
   12440 		sc->sc_tbi_linkup = 0;
   12441 		/* If the timer expired, retry autonegotiation */
   12442 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12443 		    && (++sc->sc_tbi_serdes_ticks
   12444 			>= sc->sc_tbi_serdes_anegticks)) {
   12445 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12446 				device_xname(sc->sc_dev), __func__));
   12447 			sc->sc_tbi_serdes_ticks = 0;
   12448 			/*
   12449 			 * Reset the link, and let autonegotiation do
   12450 			 * its thing
   12451 			 */
   12452 			sc->sc_ctrl |= CTRL_LRST;
   12453 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12454 			CSR_WRITE_FLUSH(sc);
   12455 			delay(1000);
   12456 			sc->sc_ctrl &= ~CTRL_LRST;
   12457 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12458 			CSR_WRITE_FLUSH(sc);
   12459 			delay(1000);
   12460 			CSR_WRITE(sc, WMREG_TXCW,
   12461 			    sc->sc_txcw & ~TXCW_ANE);
   12462 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12463 		}
   12464 	}
   12465 
   12466 setled:
   12467 	wm_tbi_serdes_set_linkled(sc);
   12468 }
   12469 
   12470 /* SERDES related */
   12471 static void
   12472 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12473 {
   12474 	uint32_t reg;
   12475 
   12476 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12477 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12478 		return;
   12479 
   12480 	/* Enable PCS to turn on link */
   12481 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12482 	reg |= PCS_CFG_PCS_EN;
   12483 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12484 
   12485 	/* Power up the laser */
   12486 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12487 	reg &= ~CTRL_EXT_SWDPIN(3);
   12488 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12489 
   12490 	/* Flush the write to verify completion */
   12491 	CSR_WRITE_FLUSH(sc);
   12492 	delay(1000);
   12493 }
   12494 
   12495 static int
   12496 wm_serdes_mediachange(struct ifnet *ifp)
   12497 {
   12498 	struct wm_softc *sc = ifp->if_softc;
   12499 	bool pcs_autoneg = true; /* XXX */
   12500 	uint32_t ctrl_ext, pcs_lctl, reg;
   12501 
   12502 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12503 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12504 		return 0;
   12505 
   12506 	/* XXX Currently, this function is not called on 8257[12] */
   12507 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12508 	    || (sc->sc_type >= WM_T_82575))
   12509 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12510 
   12511 	/* Power on the sfp cage if present */
   12512 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12513 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12514 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   12515 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12516 
   12517 	sc->sc_ctrl |= CTRL_SLU;
   12518 
   12519 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   12520 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12521 
   12522 		reg = CSR_READ(sc, WMREG_CONNSW);
   12523 		reg |= CONNSW_ENRGSRC;
   12524 		CSR_WRITE(sc, WMREG_CONNSW, reg);
   12525 	}
   12526 
   12527 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12528 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12529 	case CTRL_EXT_LINK_MODE_SGMII:
   12530 		/* SGMII mode lets the phy handle forcing speed/duplex */
   12531 		pcs_autoneg = true;
   12532 		/* Autoneg time out should be disabled for SGMII mode */
   12533 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12534 		break;
   12535 	case CTRL_EXT_LINK_MODE_1000KX:
   12536 		pcs_autoneg = false;
   12537 		/* FALLTHROUGH */
   12538 	default:
   12539 		if ((sc->sc_type == WM_T_82575)
   12540 		    || (sc->sc_type == WM_T_82576)) {
   12541 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12542 				pcs_autoneg = false;
   12543 		}
   12544 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12545 		    | CTRL_FRCFDX;
   12546 
   12547 		/* Set speed of 1000/Full if speed/duplex is forced */
   12548 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12549 	}
   12550 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12551 
   12552 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   12553 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   12554 
   12555 	if (pcs_autoneg) {
   12556 		/* Set PCS register for autoneg */
   12557 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12558 
   12559 		/* Disable force flow control for autoneg */
   12560 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12561 
   12562 		/* Configure flow control advertisement for autoneg */
   12563 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12564 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12565 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12566 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12567 	} else
   12568 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12569 
   12570 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12571 
   12572 	return 0;
   12573 }
   12574 
   12575 static void
   12576 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12577 {
   12578 	struct wm_softc *sc = ifp->if_softc;
   12579 	struct mii_data *mii = &sc->sc_mii;
   12580 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12581 	uint32_t pcs_adv, pcs_lpab, reg;
   12582 
   12583 	ifmr->ifm_status = IFM_AVALID;
   12584 	ifmr->ifm_active = IFM_ETHER;
   12585 
   12586 	/* Check PCS */
   12587 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12588 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12589 		ifmr->ifm_active |= IFM_NONE;
   12590 		sc->sc_tbi_linkup = 0;
   12591 		goto setled;
   12592 	}
   12593 
   12594 	sc->sc_tbi_linkup = 1;
   12595 	ifmr->ifm_status |= IFM_ACTIVE;
   12596 	if (sc->sc_type == WM_T_I354) {
   12597 		uint32_t status;
   12598 
   12599 		status = CSR_READ(sc, WMREG_STATUS);
   12600 		if (((status & STATUS_2P5_SKU) != 0)
   12601 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12602 			ifmr->ifm_active |= IFM_2500_KX;
   12603 		} else
   12604 			ifmr->ifm_active |= IFM_1000_KX;
   12605 	} else {
   12606 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12607 		case PCS_LSTS_SPEED_10:
   12608 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12609 			break;
   12610 		case PCS_LSTS_SPEED_100:
   12611 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12612 			break;
   12613 		case PCS_LSTS_SPEED_1000:
   12614 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12615 			break;
   12616 		default:
   12617 			device_printf(sc->sc_dev, "Unknown speed\n");
   12618 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12619 			break;
   12620 		}
   12621 	}
   12622 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   12623 	if ((reg & PCS_LSTS_FDX) != 0)
   12624 		ifmr->ifm_active |= IFM_FDX;
   12625 	else
   12626 		ifmr->ifm_active |= IFM_HDX;
   12627 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12628 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12629 		/* Check flow */
   12630 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12631 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12632 			DPRINTF(sc, WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12633 			goto setled;
   12634 		}
   12635 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12636 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12637 		DPRINTF(sc, WM_DEBUG_LINK,
   12638 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12639 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12640 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12641 			mii->mii_media_active |= IFM_FLOW
   12642 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12643 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12644 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12645 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12646 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12647 			mii->mii_media_active |= IFM_FLOW
   12648 			    | IFM_ETH_TXPAUSE;
   12649 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12650 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12651 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12652 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12653 			mii->mii_media_active |= IFM_FLOW
   12654 			    | IFM_ETH_RXPAUSE;
   12655 		}
   12656 	}
   12657 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12658 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12659 setled:
   12660 	wm_tbi_serdes_set_linkled(sc);
   12661 }
   12662 
   12663 /*
   12664  * wm_serdes_tick:
   12665  *
   12666  *	Check the link on serdes devices.
   12667  */
   12668 static void
   12669 wm_serdes_tick(struct wm_softc *sc)
   12670 {
   12671 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12672 	struct mii_data *mii = &sc->sc_mii;
   12673 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12674 	uint32_t reg;
   12675 
   12676 	KASSERT(WM_CORE_LOCKED(sc));
   12677 
   12678 	mii->mii_media_status = IFM_AVALID;
   12679 	mii->mii_media_active = IFM_ETHER;
   12680 
   12681 	/* Check PCS */
   12682 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12683 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12684 		mii->mii_media_status |= IFM_ACTIVE;
   12685 		sc->sc_tbi_linkup = 1;
   12686 		sc->sc_tbi_serdes_ticks = 0;
   12687 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12688 		if ((reg & PCS_LSTS_FDX) != 0)
   12689 			mii->mii_media_active |= IFM_FDX;
   12690 		else
   12691 			mii->mii_media_active |= IFM_HDX;
   12692 	} else {
   12693 		mii->mii_media_status |= IFM_NONE;
   12694 		sc->sc_tbi_linkup = 0;
   12695 		/* If the timer expired, retry autonegotiation */
   12696 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12697 		    && (++sc->sc_tbi_serdes_ticks
   12698 			>= sc->sc_tbi_serdes_anegticks)) {
   12699 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12700 				device_xname(sc->sc_dev), __func__));
   12701 			sc->sc_tbi_serdes_ticks = 0;
   12702 			/* XXX */
   12703 			wm_serdes_mediachange(ifp);
   12704 		}
   12705 	}
   12706 
   12707 	wm_tbi_serdes_set_linkled(sc);
   12708 }
   12709 
   12710 /* SFP related */
   12711 
   12712 static int
   12713 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12714 {
   12715 	uint32_t i2ccmd;
   12716 	int i;
   12717 
   12718 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12719 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12720 
   12721 	/* Poll the ready bit */
   12722 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12723 		delay(50);
   12724 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12725 		if (i2ccmd & I2CCMD_READY)
   12726 			break;
   12727 	}
   12728 	if ((i2ccmd & I2CCMD_READY) == 0)
   12729 		return -1;
   12730 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12731 		return -1;
   12732 
   12733 	*data = i2ccmd & 0x00ff;
   12734 
   12735 	return 0;
   12736 }
   12737 
   12738 static uint32_t
   12739 wm_sfp_get_media_type(struct wm_softc *sc)
   12740 {
   12741 	uint32_t ctrl_ext;
   12742 	uint8_t val = 0;
   12743 	int timeout = 3;
   12744 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12745 	int rv = -1;
   12746 
   12747 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12748 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12749 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12750 	CSR_WRITE_FLUSH(sc);
   12751 
   12752 	/* Read SFP module data */
   12753 	while (timeout) {
   12754 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12755 		if (rv == 0)
   12756 			break;
   12757 		delay(100*1000); /* XXX too big */
   12758 		timeout--;
   12759 	}
   12760 	if (rv != 0)
   12761 		goto out;
   12762 
   12763 	switch (val) {
   12764 	case SFF_SFP_ID_SFF:
   12765 		aprint_normal_dev(sc->sc_dev,
   12766 		    "Module/Connector soldered to board\n");
   12767 		break;
   12768 	case SFF_SFP_ID_SFP:
   12769 		sc->sc_flags |= WM_F_SFP;
   12770 		break;
   12771 	case SFF_SFP_ID_UNKNOWN:
   12772 		goto out;
   12773 	default:
   12774 		break;
   12775 	}
   12776 
   12777 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12778 	if (rv != 0)
   12779 		goto out;
   12780 
   12781 	sc->sc_sfptype = val;
   12782 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12783 		mediatype = WM_MEDIATYPE_SERDES;
   12784 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12785 		sc->sc_flags |= WM_F_SGMII;
   12786 		mediatype = WM_MEDIATYPE_COPPER;
   12787 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12788 		sc->sc_flags |= WM_F_SGMII;
   12789 		mediatype = WM_MEDIATYPE_SERDES;
   12790 	} else {
   12791 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   12792 		    __func__, sc->sc_sfptype);
   12793 		sc->sc_sfptype = 0; /* XXX unknown */
   12794 	}
   12795 
   12796 out:
   12797 	/* Restore I2C interface setting */
   12798 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12799 
   12800 	return mediatype;
   12801 }
   12802 
   12803 /*
   12804  * NVM related.
   12805  * Microwire, SPI (w/wo EERD) and Flash.
   12806  */
   12807 
   12808 /* Both spi and uwire */
   12809 
   12810 /*
   12811  * wm_eeprom_sendbits:
   12812  *
   12813  *	Send a series of bits to the EEPROM.
   12814  */
   12815 static void
   12816 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   12817 {
   12818 	uint32_t reg;
   12819 	int x;
   12820 
   12821 	reg = CSR_READ(sc, WMREG_EECD);
   12822 
   12823 	for (x = nbits; x > 0; x--) {
   12824 		if (bits & (1U << (x - 1)))
   12825 			reg |= EECD_DI;
   12826 		else
   12827 			reg &= ~EECD_DI;
   12828 		CSR_WRITE(sc, WMREG_EECD, reg);
   12829 		CSR_WRITE_FLUSH(sc);
   12830 		delay(2);
   12831 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12832 		CSR_WRITE_FLUSH(sc);
   12833 		delay(2);
   12834 		CSR_WRITE(sc, WMREG_EECD, reg);
   12835 		CSR_WRITE_FLUSH(sc);
   12836 		delay(2);
   12837 	}
   12838 }
   12839 
   12840 /*
   12841  * wm_eeprom_recvbits:
   12842  *
   12843  *	Receive a series of bits from the EEPROM.
   12844  */
   12845 static void
   12846 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   12847 {
   12848 	uint32_t reg, val;
   12849 	int x;
   12850 
   12851 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   12852 
   12853 	val = 0;
   12854 	for (x = nbits; x > 0; x--) {
   12855 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12856 		CSR_WRITE_FLUSH(sc);
   12857 		delay(2);
   12858 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   12859 			val |= (1U << (x - 1));
   12860 		CSR_WRITE(sc, WMREG_EECD, reg);
   12861 		CSR_WRITE_FLUSH(sc);
   12862 		delay(2);
   12863 	}
   12864 	*valp = val;
   12865 }
   12866 
   12867 /* Microwire */
   12868 
   12869 /*
   12870  * wm_nvm_read_uwire:
   12871  *
   12872  *	Read a word from the EEPROM using the MicroWire protocol.
   12873  */
   12874 static int
   12875 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12876 {
   12877 	uint32_t reg, val;
   12878 	int i;
   12879 
   12880 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   12881 		device_xname(sc->sc_dev), __func__));
   12882 
   12883 	if (sc->nvm.acquire(sc) != 0)
   12884 		return -1;
   12885 
   12886 	for (i = 0; i < wordcnt; i++) {
   12887 		/* Clear SK and DI. */
   12888 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   12889 		CSR_WRITE(sc, WMREG_EECD, reg);
   12890 
   12891 		/*
   12892 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   12893 		 * and Xen.
   12894 		 *
   12895 		 * We use this workaround only for 82540 because qemu's
   12896 		 * e1000 act as 82540.
   12897 		 */
   12898 		if (sc->sc_type == WM_T_82540) {
   12899 			reg |= EECD_SK;
   12900 			CSR_WRITE(sc, WMREG_EECD, reg);
   12901 			reg &= ~EECD_SK;
   12902 			CSR_WRITE(sc, WMREG_EECD, reg);
   12903 			CSR_WRITE_FLUSH(sc);
   12904 			delay(2);
   12905 		}
   12906 		/* XXX: end of workaround */
   12907 
   12908 		/* Set CHIP SELECT. */
   12909 		reg |= EECD_CS;
   12910 		CSR_WRITE(sc, WMREG_EECD, reg);
   12911 		CSR_WRITE_FLUSH(sc);
   12912 		delay(2);
   12913 
   12914 		/* Shift in the READ command. */
   12915 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   12916 
   12917 		/* Shift in address. */
   12918 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   12919 
   12920 		/* Shift out the data. */
   12921 		wm_eeprom_recvbits(sc, &val, 16);
   12922 		data[i] = val & 0xffff;
   12923 
   12924 		/* Clear CHIP SELECT. */
   12925 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   12926 		CSR_WRITE(sc, WMREG_EECD, reg);
   12927 		CSR_WRITE_FLUSH(sc);
   12928 		delay(2);
   12929 	}
   12930 
   12931 	sc->nvm.release(sc);
   12932 	return 0;
   12933 }
   12934 
   12935 /* SPI */
   12936 
   12937 /*
   12938  * Set SPI and FLASH related information from the EECD register.
   12939  * For 82541 and 82547, the word size is taken from EEPROM.
   12940  */
   12941 static int
   12942 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   12943 {
   12944 	int size;
   12945 	uint32_t reg;
   12946 	uint16_t data;
   12947 
   12948 	reg = CSR_READ(sc, WMREG_EECD);
   12949 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   12950 
   12951 	/* Read the size of NVM from EECD by default */
   12952 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12953 	switch (sc->sc_type) {
   12954 	case WM_T_82541:
   12955 	case WM_T_82541_2:
   12956 	case WM_T_82547:
   12957 	case WM_T_82547_2:
   12958 		/* Set dummy value to access EEPROM */
   12959 		sc->sc_nvm_wordsize = 64;
   12960 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   12961 			aprint_error_dev(sc->sc_dev,
   12962 			    "%s: failed to read EEPROM size\n", __func__);
   12963 		}
   12964 		reg = data;
   12965 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12966 		if (size == 0)
   12967 			size = 6; /* 64 word size */
   12968 		else
   12969 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   12970 		break;
   12971 	case WM_T_80003:
   12972 	case WM_T_82571:
   12973 	case WM_T_82572:
   12974 	case WM_T_82573: /* SPI case */
   12975 	case WM_T_82574: /* SPI case */
   12976 	case WM_T_82583: /* SPI case */
   12977 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12978 		if (size > 14)
   12979 			size = 14;
   12980 		break;
   12981 	case WM_T_82575:
   12982 	case WM_T_82576:
   12983 	case WM_T_82580:
   12984 	case WM_T_I350:
   12985 	case WM_T_I354:
   12986 	case WM_T_I210:
   12987 	case WM_T_I211:
   12988 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12989 		if (size > 15)
   12990 			size = 15;
   12991 		break;
   12992 	default:
   12993 		aprint_error_dev(sc->sc_dev,
   12994 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   12995 		return -1;
   12996 		break;
   12997 	}
   12998 
   12999 	sc->sc_nvm_wordsize = 1 << size;
   13000 
   13001 	return 0;
   13002 }
   13003 
   13004 /*
   13005  * wm_nvm_ready_spi:
   13006  *
   13007  *	Wait for a SPI EEPROM to be ready for commands.
   13008  */
   13009 static int
   13010 wm_nvm_ready_spi(struct wm_softc *sc)
   13011 {
   13012 	uint32_t val;
   13013 	int usec;
   13014 
   13015 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13016 		device_xname(sc->sc_dev), __func__));
   13017 
   13018 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   13019 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   13020 		wm_eeprom_recvbits(sc, &val, 8);
   13021 		if ((val & SPI_SR_RDY) == 0)
   13022 			break;
   13023 	}
   13024 	if (usec >= SPI_MAX_RETRIES) {
   13025 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   13026 		return -1;
   13027 	}
   13028 	return 0;
   13029 }
   13030 
   13031 /*
   13032  * wm_nvm_read_spi:
   13033  *
   13034  *	Read a work from the EEPROM using the SPI protocol.
   13035  */
   13036 static int
   13037 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13038 {
   13039 	uint32_t reg, val;
   13040 	int i;
   13041 	uint8_t opc;
   13042 	int rv = 0;
   13043 
   13044 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13045 		device_xname(sc->sc_dev), __func__));
   13046 
   13047 	if (sc->nvm.acquire(sc) != 0)
   13048 		return -1;
   13049 
   13050 	/* Clear SK and CS. */
   13051 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   13052 	CSR_WRITE(sc, WMREG_EECD, reg);
   13053 	CSR_WRITE_FLUSH(sc);
   13054 	delay(2);
   13055 
   13056 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   13057 		goto out;
   13058 
   13059 	/* Toggle CS to flush commands. */
   13060 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   13061 	CSR_WRITE_FLUSH(sc);
   13062 	delay(2);
   13063 	CSR_WRITE(sc, WMREG_EECD, reg);
   13064 	CSR_WRITE_FLUSH(sc);
   13065 	delay(2);
   13066 
   13067 	opc = SPI_OPC_READ;
   13068 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   13069 		opc |= SPI_OPC_A8;
   13070 
   13071 	wm_eeprom_sendbits(sc, opc, 8);
   13072 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   13073 
   13074 	for (i = 0; i < wordcnt; i++) {
   13075 		wm_eeprom_recvbits(sc, &val, 16);
   13076 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   13077 	}
   13078 
   13079 	/* Raise CS and clear SK. */
   13080 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   13081 	CSR_WRITE(sc, WMREG_EECD, reg);
   13082 	CSR_WRITE_FLUSH(sc);
   13083 	delay(2);
   13084 
   13085 out:
   13086 	sc->nvm.release(sc);
   13087 	return rv;
   13088 }
   13089 
   13090 /* Using with EERD */
   13091 
   13092 static int
   13093 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   13094 {
   13095 	uint32_t attempts = 100000;
   13096 	uint32_t i, reg = 0;
   13097 	int32_t done = -1;
   13098 
   13099 	for (i = 0; i < attempts; i++) {
   13100 		reg = CSR_READ(sc, rw);
   13101 
   13102 		if (reg & EERD_DONE) {
   13103 			done = 0;
   13104 			break;
   13105 		}
   13106 		delay(5);
   13107 	}
   13108 
   13109 	return done;
   13110 }
   13111 
   13112 static int
   13113 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   13114 {
   13115 	int i, eerd = 0;
   13116 	int rv = 0;
   13117 
   13118 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13119 		device_xname(sc->sc_dev), __func__));
   13120 
   13121 	if (sc->nvm.acquire(sc) != 0)
   13122 		return -1;
   13123 
   13124 	for (i = 0; i < wordcnt; i++) {
   13125 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   13126 		CSR_WRITE(sc, WMREG_EERD, eerd);
   13127 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   13128 		if (rv != 0) {
   13129 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   13130 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   13131 			break;
   13132 		}
   13133 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   13134 	}
   13135 
   13136 	sc->nvm.release(sc);
   13137 	return rv;
   13138 }
   13139 
   13140 /* Flash */
   13141 
   13142 static int
   13143 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   13144 {
   13145 	uint32_t eecd;
   13146 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   13147 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   13148 	uint32_t nvm_dword = 0;
   13149 	uint8_t sig_byte = 0;
   13150 	int rv;
   13151 
   13152 	switch (sc->sc_type) {
   13153 	case WM_T_PCH_SPT:
   13154 	case WM_T_PCH_CNP:
   13155 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   13156 		act_offset = ICH_NVM_SIG_WORD * 2;
   13157 
   13158 		/* Set bank to 0 in case flash read fails. */
   13159 		*bank = 0;
   13160 
   13161 		/* Check bank 0 */
   13162 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   13163 		if (rv != 0)
   13164 			return rv;
   13165 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13166 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13167 			*bank = 0;
   13168 			return 0;
   13169 		}
   13170 
   13171 		/* Check bank 1 */
   13172 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   13173 		    &nvm_dword);
   13174 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13175 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13176 			*bank = 1;
   13177 			return 0;
   13178 		}
   13179 		aprint_error_dev(sc->sc_dev,
   13180 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   13181 		return -1;
   13182 	case WM_T_ICH8:
   13183 	case WM_T_ICH9:
   13184 		eecd = CSR_READ(sc, WMREG_EECD);
   13185 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   13186 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   13187 			return 0;
   13188 		}
   13189 		/* FALLTHROUGH */
   13190 	default:
   13191 		/* Default to 0 */
   13192 		*bank = 0;
   13193 
   13194 		/* Check bank 0 */
   13195 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   13196 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13197 			*bank = 0;
   13198 			return 0;
   13199 		}
   13200 
   13201 		/* Check bank 1 */
   13202 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   13203 		    &sig_byte);
   13204 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13205 			*bank = 1;
   13206 			return 0;
   13207 		}
   13208 	}
   13209 
   13210 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   13211 		device_xname(sc->sc_dev)));
   13212 	return -1;
   13213 }
   13214 
   13215 /******************************************************************************
   13216  * This function does initial flash setup so that a new read/write/erase cycle
   13217  * can be started.
   13218  *
   13219  * sc - The pointer to the hw structure
   13220  ****************************************************************************/
   13221 static int32_t
   13222 wm_ich8_cycle_init(struct wm_softc *sc)
   13223 {
   13224 	uint16_t hsfsts;
   13225 	int32_t error = 1;
   13226 	int32_t i     = 0;
   13227 
   13228 	if (sc->sc_type >= WM_T_PCH_SPT)
   13229 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   13230 	else
   13231 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13232 
   13233 	/* May be check the Flash Des Valid bit in Hw status */
   13234 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   13235 		return error;
   13236 
   13237 	/* Clear FCERR in Hw status by writing 1 */
   13238 	/* Clear DAEL in Hw status by writing a 1 */
   13239 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   13240 
   13241 	if (sc->sc_type >= WM_T_PCH_SPT)
   13242 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   13243 	else
   13244 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13245 
   13246 	/*
   13247 	 * Either we should have a hardware SPI cycle in progress bit to check
   13248 	 * against, in order to start a new cycle or FDONE bit should be
   13249 	 * changed in the hardware so that it is 1 after hardware reset, which
   13250 	 * can then be used as an indication whether a cycle is in progress or
   13251 	 * has been completed .. we should also have some software semaphore
   13252 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   13253 	 * threads access to those bits can be sequentiallized or a way so that
   13254 	 * 2 threads don't start the cycle at the same time
   13255 	 */
   13256 
   13257 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13258 		/*
   13259 		 * There is no cycle running at present, so we can start a
   13260 		 * cycle
   13261 		 */
   13262 
   13263 		/* Begin by setting Flash Cycle Done. */
   13264 		hsfsts |= HSFSTS_DONE;
   13265 		if (sc->sc_type >= WM_T_PCH_SPT)
   13266 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13267 			    hsfsts & 0xffffUL);
   13268 		else
   13269 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13270 		error = 0;
   13271 	} else {
   13272 		/*
   13273 		 * Otherwise poll for sometime so the current cycle has a
   13274 		 * chance to end before giving up.
   13275 		 */
   13276 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   13277 			if (sc->sc_type >= WM_T_PCH_SPT)
   13278 				hsfsts = ICH8_FLASH_READ32(sc,
   13279 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13280 			else
   13281 				hsfsts = ICH8_FLASH_READ16(sc,
   13282 				    ICH_FLASH_HSFSTS);
   13283 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13284 				error = 0;
   13285 				break;
   13286 			}
   13287 			delay(1);
   13288 		}
   13289 		if (error == 0) {
   13290 			/*
   13291 			 * Successful in waiting for previous cycle to timeout,
   13292 			 * now set the Flash Cycle Done.
   13293 			 */
   13294 			hsfsts |= HSFSTS_DONE;
   13295 			if (sc->sc_type >= WM_T_PCH_SPT)
   13296 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13297 				    hsfsts & 0xffffUL);
   13298 			else
   13299 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   13300 				    hsfsts);
   13301 		}
   13302 	}
   13303 	return error;
   13304 }
   13305 
   13306 /******************************************************************************
   13307  * This function starts a flash cycle and waits for its completion
   13308  *
   13309  * sc - The pointer to the hw structure
   13310  ****************************************************************************/
   13311 static int32_t
   13312 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   13313 {
   13314 	uint16_t hsflctl;
   13315 	uint16_t hsfsts;
   13316 	int32_t error = 1;
   13317 	uint32_t i = 0;
   13318 
   13319 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   13320 	if (sc->sc_type >= WM_T_PCH_SPT)
   13321 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   13322 	else
   13323 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13324 	hsflctl |= HSFCTL_GO;
   13325 	if (sc->sc_type >= WM_T_PCH_SPT)
   13326 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13327 		    (uint32_t)hsflctl << 16);
   13328 	else
   13329 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13330 
   13331 	/* Wait till FDONE bit is set to 1 */
   13332 	do {
   13333 		if (sc->sc_type >= WM_T_PCH_SPT)
   13334 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13335 			    & 0xffffUL;
   13336 		else
   13337 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13338 		if (hsfsts & HSFSTS_DONE)
   13339 			break;
   13340 		delay(1);
   13341 		i++;
   13342 	} while (i < timeout);
   13343 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   13344 		error = 0;
   13345 
   13346 	return error;
   13347 }
   13348 
   13349 /******************************************************************************
   13350  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   13351  *
   13352  * sc - The pointer to the hw structure
   13353  * index - The index of the byte or word to read.
   13354  * size - Size of data to read, 1=byte 2=word, 4=dword
   13355  * data - Pointer to the word to store the value read.
   13356  *****************************************************************************/
   13357 static int32_t
   13358 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   13359     uint32_t size, uint32_t *data)
   13360 {
   13361 	uint16_t hsfsts;
   13362 	uint16_t hsflctl;
   13363 	uint32_t flash_linear_address;
   13364 	uint32_t flash_data = 0;
   13365 	int32_t error = 1;
   13366 	int32_t count = 0;
   13367 
   13368 	if (size < 1  || size > 4 || data == 0x0 ||
   13369 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   13370 		return error;
   13371 
   13372 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   13373 	    sc->sc_ich8_flash_base;
   13374 
   13375 	do {
   13376 		delay(1);
   13377 		/* Steps */
   13378 		error = wm_ich8_cycle_init(sc);
   13379 		if (error)
   13380 			break;
   13381 
   13382 		if (sc->sc_type >= WM_T_PCH_SPT)
   13383 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13384 			    >> 16;
   13385 		else
   13386 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13387 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   13388 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   13389 		    & HSFCTL_BCOUNT_MASK;
   13390 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   13391 		if (sc->sc_type >= WM_T_PCH_SPT) {
   13392 			/*
   13393 			 * In SPT, This register is in Lan memory space, not
   13394 			 * flash. Therefore, only 32 bit access is supported.
   13395 			 */
   13396 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13397 			    (uint32_t)hsflctl << 16);
   13398 		} else
   13399 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13400 
   13401 		/*
   13402 		 * Write the last 24 bits of index into Flash Linear address
   13403 		 * field in Flash Address
   13404 		 */
   13405 		/* TODO: TBD maybe check the index against the size of flash */
   13406 
   13407 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   13408 
   13409 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   13410 
   13411 		/*
   13412 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   13413 		 * the whole sequence a few more times, else read in (shift in)
   13414 		 * the Flash Data0, the order is least significant byte first
   13415 		 * msb to lsb
   13416 		 */
   13417 		if (error == 0) {
   13418 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13419 			if (size == 1)
   13420 				*data = (uint8_t)(flash_data & 0x000000FF);
   13421 			else if (size == 2)
   13422 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13423 			else if (size == 4)
   13424 				*data = (uint32_t)flash_data;
   13425 			break;
   13426 		} else {
   13427 			/*
   13428 			 * If we've gotten here, then things are probably
   13429 			 * completely hosed, but if the error condition is
   13430 			 * detected, it won't hurt to give it another try...
   13431 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13432 			 */
   13433 			if (sc->sc_type >= WM_T_PCH_SPT)
   13434 				hsfsts = ICH8_FLASH_READ32(sc,
   13435 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13436 			else
   13437 				hsfsts = ICH8_FLASH_READ16(sc,
   13438 				    ICH_FLASH_HSFSTS);
   13439 
   13440 			if (hsfsts & HSFSTS_ERR) {
   13441 				/* Repeat for some time before giving up. */
   13442 				continue;
   13443 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13444 				break;
   13445 		}
   13446 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13447 
   13448 	return error;
   13449 }
   13450 
   13451 /******************************************************************************
   13452  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13453  *
   13454  * sc - pointer to wm_hw structure
   13455  * index - The index of the byte to read.
   13456  * data - Pointer to a byte to store the value read.
   13457  *****************************************************************************/
   13458 static int32_t
   13459 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13460 {
   13461 	int32_t status;
   13462 	uint32_t word = 0;
   13463 
   13464 	status = wm_read_ich8_data(sc, index, 1, &word);
   13465 	if (status == 0)
   13466 		*data = (uint8_t)word;
   13467 	else
   13468 		*data = 0;
   13469 
   13470 	return status;
   13471 }
   13472 
   13473 /******************************************************************************
   13474  * Reads a word from the NVM using the ICH8 flash access registers.
   13475  *
   13476  * sc - pointer to wm_hw structure
   13477  * index - The starting byte index of the word to read.
   13478  * data - Pointer to a word to store the value read.
   13479  *****************************************************************************/
   13480 static int32_t
   13481 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13482 {
   13483 	int32_t status;
   13484 	uint32_t word = 0;
   13485 
   13486 	status = wm_read_ich8_data(sc, index, 2, &word);
   13487 	if (status == 0)
   13488 		*data = (uint16_t)word;
   13489 	else
   13490 		*data = 0;
   13491 
   13492 	return status;
   13493 }
   13494 
   13495 /******************************************************************************
   13496  * Reads a dword from the NVM using the ICH8 flash access registers.
   13497  *
   13498  * sc - pointer to wm_hw structure
   13499  * index - The starting byte index of the word to read.
   13500  * data - Pointer to a word to store the value read.
   13501  *****************************************************************************/
   13502 static int32_t
   13503 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13504 {
   13505 	int32_t status;
   13506 
   13507 	status = wm_read_ich8_data(sc, index, 4, data);
   13508 	return status;
   13509 }
   13510 
   13511 /******************************************************************************
   13512  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13513  * register.
   13514  *
   13515  * sc - Struct containing variables accessed by shared code
   13516  * offset - offset of word in the EEPROM to read
   13517  * data - word read from the EEPROM
   13518  * words - number of words to read
   13519  *****************************************************************************/
   13520 static int
   13521 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13522 {
   13523 	int32_t	 rv = 0;
   13524 	uint32_t flash_bank = 0;
   13525 	uint32_t act_offset = 0;
   13526 	uint32_t bank_offset = 0;
   13527 	uint16_t word = 0;
   13528 	uint16_t i = 0;
   13529 
   13530 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13531 		device_xname(sc->sc_dev), __func__));
   13532 
   13533 	if (sc->nvm.acquire(sc) != 0)
   13534 		return -1;
   13535 
   13536 	/*
   13537 	 * We need to know which is the valid flash bank.  In the event
   13538 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13539 	 * managing flash_bank. So it cannot be trusted and needs
   13540 	 * to be updated with each read.
   13541 	 */
   13542 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13543 	if (rv) {
   13544 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13545 			device_xname(sc->sc_dev)));
   13546 		flash_bank = 0;
   13547 	}
   13548 
   13549 	/*
   13550 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13551 	 * size
   13552 	 */
   13553 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13554 
   13555 	for (i = 0; i < words; i++) {
   13556 		/* The NVM part needs a byte offset, hence * 2 */
   13557 		act_offset = bank_offset + ((offset + i) * 2);
   13558 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13559 		if (rv) {
   13560 			aprint_error_dev(sc->sc_dev,
   13561 			    "%s: failed to read NVM\n", __func__);
   13562 			break;
   13563 		}
   13564 		data[i] = word;
   13565 	}
   13566 
   13567 	sc->nvm.release(sc);
   13568 	return rv;
   13569 }
   13570 
   13571 /******************************************************************************
   13572  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13573  * register.
   13574  *
   13575  * sc - Struct containing variables accessed by shared code
   13576  * offset - offset of word in the EEPROM to read
   13577  * data - word read from the EEPROM
   13578  * words - number of words to read
   13579  *****************************************************************************/
   13580 static int
   13581 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13582 {
   13583 	int32_t	 rv = 0;
   13584 	uint32_t flash_bank = 0;
   13585 	uint32_t act_offset = 0;
   13586 	uint32_t bank_offset = 0;
   13587 	uint32_t dword = 0;
   13588 	uint16_t i = 0;
   13589 
   13590 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13591 		device_xname(sc->sc_dev), __func__));
   13592 
   13593 	if (sc->nvm.acquire(sc) != 0)
   13594 		return -1;
   13595 
   13596 	/*
   13597 	 * We need to know which is the valid flash bank.  In the event
   13598 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13599 	 * managing flash_bank. So it cannot be trusted and needs
   13600 	 * to be updated with each read.
   13601 	 */
   13602 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13603 	if (rv) {
   13604 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13605 			device_xname(sc->sc_dev)));
   13606 		flash_bank = 0;
   13607 	}
   13608 
   13609 	/*
   13610 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13611 	 * size
   13612 	 */
   13613 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13614 
   13615 	for (i = 0; i < words; i++) {
   13616 		/* The NVM part needs a byte offset, hence * 2 */
   13617 		act_offset = bank_offset + ((offset + i) * 2);
   13618 		/* but we must read dword aligned, so mask ... */
   13619 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13620 		if (rv) {
   13621 			aprint_error_dev(sc->sc_dev,
   13622 			    "%s: failed to read NVM\n", __func__);
   13623 			break;
   13624 		}
   13625 		/* ... and pick out low or high word */
   13626 		if ((act_offset & 0x2) == 0)
   13627 			data[i] = (uint16_t)(dword & 0xFFFF);
   13628 		else
   13629 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13630 	}
   13631 
   13632 	sc->nvm.release(sc);
   13633 	return rv;
   13634 }
   13635 
   13636 /* iNVM */
   13637 
   13638 static int
   13639 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13640 {
   13641 	int32_t	 rv = 0;
   13642 	uint32_t invm_dword;
   13643 	uint16_t i;
   13644 	uint8_t record_type, word_address;
   13645 
   13646 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13647 		device_xname(sc->sc_dev), __func__));
   13648 
   13649 	for (i = 0; i < INVM_SIZE; i++) {
   13650 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13651 		/* Get record type */
   13652 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13653 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13654 			break;
   13655 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13656 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13657 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13658 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13659 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13660 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13661 			if (word_address == address) {
   13662 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13663 				rv = 0;
   13664 				break;
   13665 			}
   13666 		}
   13667 	}
   13668 
   13669 	return rv;
   13670 }
   13671 
   13672 static int
   13673 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13674 {
   13675 	int rv = 0;
   13676 	int i;
   13677 
   13678 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13679 		device_xname(sc->sc_dev), __func__));
   13680 
   13681 	if (sc->nvm.acquire(sc) != 0)
   13682 		return -1;
   13683 
   13684 	for (i = 0; i < words; i++) {
   13685 		switch (offset + i) {
   13686 		case NVM_OFF_MACADDR:
   13687 		case NVM_OFF_MACADDR1:
   13688 		case NVM_OFF_MACADDR2:
   13689 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13690 			if (rv != 0) {
   13691 				data[i] = 0xffff;
   13692 				rv = -1;
   13693 			}
   13694 			break;
   13695 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   13696 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13697 			if (rv != 0) {
   13698 				*data = INVM_DEFAULT_AL;
   13699 				rv = 0;
   13700 			}
   13701 			break;
   13702 		case NVM_OFF_CFG2:
   13703 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13704 			if (rv != 0) {
   13705 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13706 				rv = 0;
   13707 			}
   13708 			break;
   13709 		case NVM_OFF_CFG4:
   13710 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13711 			if (rv != 0) {
   13712 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13713 				rv = 0;
   13714 			}
   13715 			break;
   13716 		case NVM_OFF_LED_1_CFG:
   13717 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13718 			if (rv != 0) {
   13719 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13720 				rv = 0;
   13721 			}
   13722 			break;
   13723 		case NVM_OFF_LED_0_2_CFG:
   13724 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13725 			if (rv != 0) {
   13726 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13727 				rv = 0;
   13728 			}
   13729 			break;
   13730 		case NVM_OFF_ID_LED_SETTINGS:
   13731 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13732 			if (rv != 0) {
   13733 				*data = ID_LED_RESERVED_FFFF;
   13734 				rv = 0;
   13735 			}
   13736 			break;
   13737 		default:
   13738 			DPRINTF(sc, WM_DEBUG_NVM,
   13739 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13740 			*data = NVM_RESERVED_WORD;
   13741 			break;
   13742 		}
   13743 	}
   13744 
   13745 	sc->nvm.release(sc);
   13746 	return rv;
   13747 }
   13748 
   13749 /* Lock, detecting NVM type, validate checksum, version and read */
   13750 
   13751 static int
   13752 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13753 {
   13754 	uint32_t eecd = 0;
   13755 
   13756 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13757 	    || sc->sc_type == WM_T_82583) {
   13758 		eecd = CSR_READ(sc, WMREG_EECD);
   13759 
   13760 		/* Isolate bits 15 & 16 */
   13761 		eecd = ((eecd >> 15) & 0x03);
   13762 
   13763 		/* If both bits are set, device is Flash type */
   13764 		if (eecd == 0x03)
   13765 			return 0;
   13766 	}
   13767 	return 1;
   13768 }
   13769 
   13770 static int
   13771 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13772 {
   13773 	uint32_t eec;
   13774 
   13775 	eec = CSR_READ(sc, WMREG_EEC);
   13776 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13777 		return 1;
   13778 
   13779 	return 0;
   13780 }
   13781 
   13782 /*
   13783  * wm_nvm_validate_checksum
   13784  *
   13785  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13786  */
   13787 static int
   13788 wm_nvm_validate_checksum(struct wm_softc *sc)
   13789 {
   13790 	uint16_t checksum;
   13791 	uint16_t eeprom_data;
   13792 #ifdef WM_DEBUG
   13793 	uint16_t csum_wordaddr, valid_checksum;
   13794 #endif
   13795 	int i;
   13796 
   13797 	checksum = 0;
   13798 
   13799 	/* Don't check for I211 */
   13800 	if (sc->sc_type == WM_T_I211)
   13801 		return 0;
   13802 
   13803 #ifdef WM_DEBUG
   13804 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   13805 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   13806 		csum_wordaddr = NVM_OFF_COMPAT;
   13807 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   13808 	} else {
   13809 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   13810 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   13811 	}
   13812 
   13813 	/* Dump EEPROM image for debug */
   13814 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13815 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13816 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   13817 		/* XXX PCH_SPT? */
   13818 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   13819 		if ((eeprom_data & valid_checksum) == 0)
   13820 			DPRINTF(sc, WM_DEBUG_NVM,
   13821 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   13822 				device_xname(sc->sc_dev), eeprom_data,
   13823 				    valid_checksum));
   13824 	}
   13825 
   13826 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
   13827 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   13828 		for (i = 0; i < NVM_SIZE; i++) {
   13829 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13830 				printf("XXXX ");
   13831 			else
   13832 				printf("%04hx ", eeprom_data);
   13833 			if (i % 8 == 7)
   13834 				printf("\n");
   13835 		}
   13836 	}
   13837 
   13838 #endif /* WM_DEBUG */
   13839 
   13840 	for (i = 0; i < NVM_SIZE; i++) {
   13841 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13842 			return 1;
   13843 		checksum += eeprom_data;
   13844 	}
   13845 
   13846 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   13847 #ifdef WM_DEBUG
   13848 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   13849 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   13850 #endif
   13851 	}
   13852 
   13853 	return 0;
   13854 }
   13855 
   13856 static void
   13857 wm_nvm_version_invm(struct wm_softc *sc)
   13858 {
   13859 	uint32_t dword;
   13860 
   13861 	/*
   13862 	 * Linux's code to decode version is very strange, so we don't
   13863 	 * obey that algorithm and just use word 61 as the document.
   13864 	 * Perhaps it's not perfect though...
   13865 	 *
   13866 	 * Example:
   13867 	 *
   13868 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   13869 	 */
   13870 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   13871 	dword = __SHIFTOUT(dword, INVM_VER_1);
   13872 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   13873 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   13874 }
   13875 
   13876 static void
   13877 wm_nvm_version(struct wm_softc *sc)
   13878 {
   13879 	uint16_t major, minor, build, patch;
   13880 	uint16_t uid0, uid1;
   13881 	uint16_t nvm_data;
   13882 	uint16_t off;
   13883 	bool check_version = false;
   13884 	bool check_optionrom = false;
   13885 	bool have_build = false;
   13886 	bool have_uid = true;
   13887 
   13888 	/*
   13889 	 * Version format:
   13890 	 *
   13891 	 * XYYZ
   13892 	 * X0YZ
   13893 	 * X0YY
   13894 	 *
   13895 	 * Example:
   13896 	 *
   13897 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   13898 	 *	82571	0x50a6	5.10.6?
   13899 	 *	82572	0x506a	5.6.10?
   13900 	 *	82572EI	0x5069	5.6.9?
   13901 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   13902 	 *		0x2013	2.1.3?
   13903 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   13904 	 * ICH8+82567	0x0040	0.4.0?
   13905 	 * ICH9+82566	0x1040	1.4.0?
   13906 	 *ICH10+82567	0x0043	0.4.3?
   13907 	 *  PCH+82577	0x00c1	0.12.1?
   13908 	 * PCH2+82579	0x00d3	0.13.3?
   13909 	 *		0x00d4	0.13.4?
   13910 	 *  LPT+I218	0x0023	0.2.3?
   13911 	 *  SPT+I219	0x0084	0.8.4?
   13912 	 *  CNP+I219	0x0054	0.5.4?
   13913 	 */
   13914 
   13915 	/*
   13916 	 * XXX
   13917 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   13918 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   13919 	 */
   13920 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   13921 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   13922 		have_uid = false;
   13923 
   13924 	switch (sc->sc_type) {
   13925 	case WM_T_82571:
   13926 	case WM_T_82572:
   13927 	case WM_T_82574:
   13928 	case WM_T_82583:
   13929 		check_version = true;
   13930 		check_optionrom = true;
   13931 		have_build = true;
   13932 		break;
   13933 	case WM_T_ICH8:
   13934 	case WM_T_ICH9:
   13935 	case WM_T_ICH10:
   13936 	case WM_T_PCH:
   13937 	case WM_T_PCH2:
   13938 	case WM_T_PCH_LPT:
   13939 	case WM_T_PCH_SPT:
   13940 	case WM_T_PCH_CNP:
   13941 		check_version = true;
   13942 		have_build = true;
   13943 		have_uid = false;
   13944 		break;
   13945 	case WM_T_82575:
   13946 	case WM_T_82576:
   13947 	case WM_T_82580:
   13948 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   13949 			check_version = true;
   13950 		break;
   13951 	case WM_T_I211:
   13952 		wm_nvm_version_invm(sc);
   13953 		have_uid = false;
   13954 		goto printver;
   13955 	case WM_T_I210:
   13956 		if (!wm_nvm_flash_presence_i210(sc)) {
   13957 			wm_nvm_version_invm(sc);
   13958 			have_uid = false;
   13959 			goto printver;
   13960 		}
   13961 		/* FALLTHROUGH */
   13962 	case WM_T_I350:
   13963 	case WM_T_I354:
   13964 		check_version = true;
   13965 		check_optionrom = true;
   13966 		break;
   13967 	default:
   13968 		return;
   13969 	}
   13970 	if (check_version
   13971 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   13972 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   13973 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   13974 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   13975 			build = nvm_data & NVM_BUILD_MASK;
   13976 			have_build = true;
   13977 		} else
   13978 			minor = nvm_data & 0x00ff;
   13979 
   13980 		/* Decimal */
   13981 		minor = (minor / 16) * 10 + (minor % 16);
   13982 		sc->sc_nvm_ver_major = major;
   13983 		sc->sc_nvm_ver_minor = minor;
   13984 
   13985 printver:
   13986 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   13987 		    sc->sc_nvm_ver_minor);
   13988 		if (have_build) {
   13989 			sc->sc_nvm_ver_build = build;
   13990 			aprint_verbose(".%d", build);
   13991 		}
   13992 	}
   13993 
   13994 	/* Assume the Option ROM area is at avove NVM_SIZE */
   13995 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   13996 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   13997 		/* Option ROM Version */
   13998 		if ((off != 0x0000) && (off != 0xffff)) {
   13999 			int rv;
   14000 
   14001 			off += NVM_COMBO_VER_OFF;
   14002 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   14003 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   14004 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   14005 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   14006 				/* 16bits */
   14007 				major = uid0 >> 8;
   14008 				build = (uid0 << 8) | (uid1 >> 8);
   14009 				patch = uid1 & 0x00ff;
   14010 				aprint_verbose(", option ROM Version %d.%d.%d",
   14011 				    major, build, patch);
   14012 			}
   14013 		}
   14014 	}
   14015 
   14016 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   14017 		aprint_verbose(", Image Unique ID %08x",
   14018 		    ((uint32_t)uid1 << 16) | uid0);
   14019 }
   14020 
   14021 /*
   14022  * wm_nvm_read:
   14023  *
   14024  *	Read data from the serial EEPROM.
   14025  */
   14026 static int
   14027 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14028 {
   14029 	int rv;
   14030 
   14031 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14032 		device_xname(sc->sc_dev), __func__));
   14033 
   14034 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   14035 		return -1;
   14036 
   14037 	rv = sc->nvm.read(sc, word, wordcnt, data);
   14038 
   14039 	return rv;
   14040 }
   14041 
   14042 /*
   14043  * Hardware semaphores.
   14044  * Very complexed...
   14045  */
   14046 
   14047 static int
   14048 wm_get_null(struct wm_softc *sc)
   14049 {
   14050 
   14051 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14052 		device_xname(sc->sc_dev), __func__));
   14053 	return 0;
   14054 }
   14055 
   14056 static void
   14057 wm_put_null(struct wm_softc *sc)
   14058 {
   14059 
   14060 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14061 		device_xname(sc->sc_dev), __func__));
   14062 	return;
   14063 }
   14064 
   14065 static int
   14066 wm_get_eecd(struct wm_softc *sc)
   14067 {
   14068 	uint32_t reg;
   14069 	int x;
   14070 
   14071 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14072 		device_xname(sc->sc_dev), __func__));
   14073 
   14074 	reg = CSR_READ(sc, WMREG_EECD);
   14075 
   14076 	/* Request EEPROM access. */
   14077 	reg |= EECD_EE_REQ;
   14078 	CSR_WRITE(sc, WMREG_EECD, reg);
   14079 
   14080 	/* ..and wait for it to be granted. */
   14081 	for (x = 0; x < 1000; x++) {
   14082 		reg = CSR_READ(sc, WMREG_EECD);
   14083 		if (reg & EECD_EE_GNT)
   14084 			break;
   14085 		delay(5);
   14086 	}
   14087 	if ((reg & EECD_EE_GNT) == 0) {
   14088 		aprint_error_dev(sc->sc_dev,
   14089 		    "could not acquire EEPROM GNT\n");
   14090 		reg &= ~EECD_EE_REQ;
   14091 		CSR_WRITE(sc, WMREG_EECD, reg);
   14092 		return -1;
   14093 	}
   14094 
   14095 	return 0;
   14096 }
   14097 
   14098 static void
   14099 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   14100 {
   14101 
   14102 	*eecd |= EECD_SK;
   14103 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14104 	CSR_WRITE_FLUSH(sc);
   14105 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14106 		delay(1);
   14107 	else
   14108 		delay(50);
   14109 }
   14110 
   14111 static void
   14112 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   14113 {
   14114 
   14115 	*eecd &= ~EECD_SK;
   14116 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14117 	CSR_WRITE_FLUSH(sc);
   14118 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14119 		delay(1);
   14120 	else
   14121 		delay(50);
   14122 }
   14123 
   14124 static void
   14125 wm_put_eecd(struct wm_softc *sc)
   14126 {
   14127 	uint32_t reg;
   14128 
   14129 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14130 		device_xname(sc->sc_dev), __func__));
   14131 
   14132 	/* Stop nvm */
   14133 	reg = CSR_READ(sc, WMREG_EECD);
   14134 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   14135 		/* Pull CS high */
   14136 		reg |= EECD_CS;
   14137 		wm_nvm_eec_clock_lower(sc, &reg);
   14138 	} else {
   14139 		/* CS on Microwire is active-high */
   14140 		reg &= ~(EECD_CS | EECD_DI);
   14141 		CSR_WRITE(sc, WMREG_EECD, reg);
   14142 		wm_nvm_eec_clock_raise(sc, &reg);
   14143 		wm_nvm_eec_clock_lower(sc, &reg);
   14144 	}
   14145 
   14146 	reg = CSR_READ(sc, WMREG_EECD);
   14147 	reg &= ~EECD_EE_REQ;
   14148 	CSR_WRITE(sc, WMREG_EECD, reg);
   14149 
   14150 	return;
   14151 }
   14152 
   14153 /*
   14154  * Get hardware semaphore.
   14155  * Same as e1000_get_hw_semaphore_generic()
   14156  */
   14157 static int
   14158 wm_get_swsm_semaphore(struct wm_softc *sc)
   14159 {
   14160 	int32_t timeout;
   14161 	uint32_t swsm;
   14162 
   14163 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14164 		device_xname(sc->sc_dev), __func__));
   14165 	KASSERT(sc->sc_nvm_wordsize > 0);
   14166 
   14167 retry:
   14168 	/* Get the SW semaphore. */
   14169 	timeout = sc->sc_nvm_wordsize + 1;
   14170 	while (timeout) {
   14171 		swsm = CSR_READ(sc, WMREG_SWSM);
   14172 
   14173 		if ((swsm & SWSM_SMBI) == 0)
   14174 			break;
   14175 
   14176 		delay(50);
   14177 		timeout--;
   14178 	}
   14179 
   14180 	if (timeout == 0) {
   14181 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   14182 			/*
   14183 			 * In rare circumstances, the SW semaphore may already
   14184 			 * be held unintentionally. Clear the semaphore once
   14185 			 * before giving up.
   14186 			 */
   14187 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   14188 			wm_put_swsm_semaphore(sc);
   14189 			goto retry;
   14190 		}
   14191 		aprint_error_dev(sc->sc_dev,
   14192 		    "could not acquire SWSM SMBI\n");
   14193 		return 1;
   14194 	}
   14195 
   14196 	/* Get the FW semaphore. */
   14197 	timeout = sc->sc_nvm_wordsize + 1;
   14198 	while (timeout) {
   14199 		swsm = CSR_READ(sc, WMREG_SWSM);
   14200 		swsm |= SWSM_SWESMBI;
   14201 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   14202 		/* If we managed to set the bit we got the semaphore. */
   14203 		swsm = CSR_READ(sc, WMREG_SWSM);
   14204 		if (swsm & SWSM_SWESMBI)
   14205 			break;
   14206 
   14207 		delay(50);
   14208 		timeout--;
   14209 	}
   14210 
   14211 	if (timeout == 0) {
   14212 		aprint_error_dev(sc->sc_dev,
   14213 		    "could not acquire SWSM SWESMBI\n");
   14214 		/* Release semaphores */
   14215 		wm_put_swsm_semaphore(sc);
   14216 		return 1;
   14217 	}
   14218 	return 0;
   14219 }
   14220 
   14221 /*
   14222  * Put hardware semaphore.
   14223  * Same as e1000_put_hw_semaphore_generic()
   14224  */
   14225 static void
   14226 wm_put_swsm_semaphore(struct wm_softc *sc)
   14227 {
   14228 	uint32_t swsm;
   14229 
   14230 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14231 		device_xname(sc->sc_dev), __func__));
   14232 
   14233 	swsm = CSR_READ(sc, WMREG_SWSM);
   14234 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   14235 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   14236 }
   14237 
   14238 /*
   14239  * Get SW/FW semaphore.
   14240  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   14241  */
   14242 static int
   14243 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14244 {
   14245 	uint32_t swfw_sync;
   14246 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   14247 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   14248 	int timeout;
   14249 
   14250 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14251 		device_xname(sc->sc_dev), __func__));
   14252 
   14253 	if (sc->sc_type == WM_T_80003)
   14254 		timeout = 50;
   14255 	else
   14256 		timeout = 200;
   14257 
   14258 	while (timeout) {
   14259 		if (wm_get_swsm_semaphore(sc)) {
   14260 			aprint_error_dev(sc->sc_dev,
   14261 			    "%s: failed to get semaphore\n",
   14262 			    __func__);
   14263 			return 1;
   14264 		}
   14265 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14266 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   14267 			swfw_sync |= swmask;
   14268 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14269 			wm_put_swsm_semaphore(sc);
   14270 			return 0;
   14271 		}
   14272 		wm_put_swsm_semaphore(sc);
   14273 		delay(5000);
   14274 		timeout--;
   14275 	}
   14276 	device_printf(sc->sc_dev,
   14277 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   14278 	    mask, swfw_sync);
   14279 	return 1;
   14280 }
   14281 
   14282 static void
   14283 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14284 {
   14285 	uint32_t swfw_sync;
   14286 
   14287 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14288 		device_xname(sc->sc_dev), __func__));
   14289 
   14290 	while (wm_get_swsm_semaphore(sc) != 0)
   14291 		continue;
   14292 
   14293 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14294 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   14295 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14296 
   14297 	wm_put_swsm_semaphore(sc);
   14298 }
   14299 
   14300 static int
   14301 wm_get_nvm_80003(struct wm_softc *sc)
   14302 {
   14303 	int rv;
   14304 
   14305 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14306 		device_xname(sc->sc_dev), __func__));
   14307 
   14308 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   14309 		aprint_error_dev(sc->sc_dev,
   14310 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   14311 		return rv;
   14312 	}
   14313 
   14314 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14315 	    && (rv = wm_get_eecd(sc)) != 0) {
   14316 		aprint_error_dev(sc->sc_dev,
   14317 		    "%s: failed to get semaphore(EECD)\n", __func__);
   14318 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14319 		return rv;
   14320 	}
   14321 
   14322 	return 0;
   14323 }
   14324 
   14325 static void
   14326 wm_put_nvm_80003(struct wm_softc *sc)
   14327 {
   14328 
   14329 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14330 		device_xname(sc->sc_dev), __func__));
   14331 
   14332 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14333 		wm_put_eecd(sc);
   14334 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14335 }
   14336 
   14337 static int
   14338 wm_get_nvm_82571(struct wm_softc *sc)
   14339 {
   14340 	int rv;
   14341 
   14342 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14343 		device_xname(sc->sc_dev), __func__));
   14344 
   14345 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   14346 		return rv;
   14347 
   14348 	switch (sc->sc_type) {
   14349 	case WM_T_82573:
   14350 		break;
   14351 	default:
   14352 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14353 			rv = wm_get_eecd(sc);
   14354 		break;
   14355 	}
   14356 
   14357 	if (rv != 0) {
   14358 		aprint_error_dev(sc->sc_dev,
   14359 		    "%s: failed to get semaphore\n",
   14360 		    __func__);
   14361 		wm_put_swsm_semaphore(sc);
   14362 	}
   14363 
   14364 	return rv;
   14365 }
   14366 
   14367 static void
   14368 wm_put_nvm_82571(struct wm_softc *sc)
   14369 {
   14370 
   14371 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14372 		device_xname(sc->sc_dev), __func__));
   14373 
   14374 	switch (sc->sc_type) {
   14375 	case WM_T_82573:
   14376 		break;
   14377 	default:
   14378 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14379 			wm_put_eecd(sc);
   14380 		break;
   14381 	}
   14382 
   14383 	wm_put_swsm_semaphore(sc);
   14384 }
   14385 
   14386 static int
   14387 wm_get_phy_82575(struct wm_softc *sc)
   14388 {
   14389 
   14390 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14391 		device_xname(sc->sc_dev), __func__));
   14392 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14393 }
   14394 
   14395 static void
   14396 wm_put_phy_82575(struct wm_softc *sc)
   14397 {
   14398 
   14399 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14400 		device_xname(sc->sc_dev), __func__));
   14401 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14402 }
   14403 
   14404 static int
   14405 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   14406 {
   14407 	uint32_t ext_ctrl;
   14408 	int timeout = 200;
   14409 
   14410 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14411 		device_xname(sc->sc_dev), __func__));
   14412 
   14413 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14414 	for (timeout = 0; timeout < 200; timeout++) {
   14415 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14416 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14417 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14418 
   14419 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14420 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14421 			return 0;
   14422 		delay(5000);
   14423 	}
   14424 	device_printf(sc->sc_dev,
   14425 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   14426 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14427 	return 1;
   14428 }
   14429 
   14430 static void
   14431 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   14432 {
   14433 	uint32_t ext_ctrl;
   14434 
   14435 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14436 		device_xname(sc->sc_dev), __func__));
   14437 
   14438 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14439 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14440 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14441 
   14442 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14443 }
   14444 
   14445 static int
   14446 wm_get_swflag_ich8lan(struct wm_softc *sc)
   14447 {
   14448 	uint32_t ext_ctrl;
   14449 	int timeout;
   14450 
   14451 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14452 		device_xname(sc->sc_dev), __func__));
   14453 	mutex_enter(sc->sc_ich_phymtx);
   14454 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14455 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14456 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14457 			break;
   14458 		delay(1000);
   14459 	}
   14460 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14461 		device_printf(sc->sc_dev,
   14462 		    "SW has already locked the resource\n");
   14463 		goto out;
   14464 	}
   14465 
   14466 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14467 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14468 	for (timeout = 0; timeout < 1000; timeout++) {
   14469 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14470 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14471 			break;
   14472 		delay(1000);
   14473 	}
   14474 	if (timeout >= 1000) {
   14475 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   14476 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14477 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14478 		goto out;
   14479 	}
   14480 	return 0;
   14481 
   14482 out:
   14483 	mutex_exit(sc->sc_ich_phymtx);
   14484 	return 1;
   14485 }
   14486 
   14487 static void
   14488 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14489 {
   14490 	uint32_t ext_ctrl;
   14491 
   14492 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14493 		device_xname(sc->sc_dev), __func__));
   14494 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14495 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14496 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14497 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14498 	} else {
   14499 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   14500 	}
   14501 
   14502 	mutex_exit(sc->sc_ich_phymtx);
   14503 }
   14504 
   14505 static int
   14506 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14507 {
   14508 
   14509 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14510 		device_xname(sc->sc_dev), __func__));
   14511 	mutex_enter(sc->sc_ich_nvmmtx);
   14512 
   14513 	return 0;
   14514 }
   14515 
   14516 static void
   14517 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14518 {
   14519 
   14520 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14521 		device_xname(sc->sc_dev), __func__));
   14522 	mutex_exit(sc->sc_ich_nvmmtx);
   14523 }
   14524 
   14525 static int
   14526 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14527 {
   14528 	int i = 0;
   14529 	uint32_t reg;
   14530 
   14531 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14532 		device_xname(sc->sc_dev), __func__));
   14533 
   14534 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14535 	do {
   14536 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14537 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14538 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14539 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14540 			break;
   14541 		delay(2*1000);
   14542 		i++;
   14543 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14544 
   14545 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14546 		wm_put_hw_semaphore_82573(sc);
   14547 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14548 		    device_xname(sc->sc_dev));
   14549 		return -1;
   14550 	}
   14551 
   14552 	return 0;
   14553 }
   14554 
   14555 static void
   14556 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14557 {
   14558 	uint32_t reg;
   14559 
   14560 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14561 		device_xname(sc->sc_dev), __func__));
   14562 
   14563 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14564 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14565 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14566 }
   14567 
   14568 /*
   14569  * Management mode and power management related subroutines.
   14570  * BMC, AMT, suspend/resume and EEE.
   14571  */
   14572 
   14573 #ifdef WM_WOL
   14574 static int
   14575 wm_check_mng_mode(struct wm_softc *sc)
   14576 {
   14577 	int rv;
   14578 
   14579 	switch (sc->sc_type) {
   14580 	case WM_T_ICH8:
   14581 	case WM_T_ICH9:
   14582 	case WM_T_ICH10:
   14583 	case WM_T_PCH:
   14584 	case WM_T_PCH2:
   14585 	case WM_T_PCH_LPT:
   14586 	case WM_T_PCH_SPT:
   14587 	case WM_T_PCH_CNP:
   14588 		rv = wm_check_mng_mode_ich8lan(sc);
   14589 		break;
   14590 	case WM_T_82574:
   14591 	case WM_T_82583:
   14592 		rv = wm_check_mng_mode_82574(sc);
   14593 		break;
   14594 	case WM_T_82571:
   14595 	case WM_T_82572:
   14596 	case WM_T_82573:
   14597 	case WM_T_80003:
   14598 		rv = wm_check_mng_mode_generic(sc);
   14599 		break;
   14600 	default:
   14601 		/* Noting to do */
   14602 		rv = 0;
   14603 		break;
   14604 	}
   14605 
   14606 	return rv;
   14607 }
   14608 
   14609 static int
   14610 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14611 {
   14612 	uint32_t fwsm;
   14613 
   14614 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14615 
   14616 	if (((fwsm & FWSM_FW_VALID) != 0)
   14617 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14618 		return 1;
   14619 
   14620 	return 0;
   14621 }
   14622 
   14623 static int
   14624 wm_check_mng_mode_82574(struct wm_softc *sc)
   14625 {
   14626 	uint16_t data;
   14627 
   14628 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14629 
   14630 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14631 		return 1;
   14632 
   14633 	return 0;
   14634 }
   14635 
   14636 static int
   14637 wm_check_mng_mode_generic(struct wm_softc *sc)
   14638 {
   14639 	uint32_t fwsm;
   14640 
   14641 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14642 
   14643 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14644 		return 1;
   14645 
   14646 	return 0;
   14647 }
   14648 #endif /* WM_WOL */
   14649 
   14650 static int
   14651 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14652 {
   14653 	uint32_t manc, fwsm, factps;
   14654 
   14655 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14656 		return 0;
   14657 
   14658 	manc = CSR_READ(sc, WMREG_MANC);
   14659 
   14660 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14661 		device_xname(sc->sc_dev), manc));
   14662 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14663 		return 0;
   14664 
   14665 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14666 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14667 		factps = CSR_READ(sc, WMREG_FACTPS);
   14668 		if (((factps & FACTPS_MNGCG) == 0)
   14669 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14670 			return 1;
   14671 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14672 		uint16_t data;
   14673 
   14674 		factps = CSR_READ(sc, WMREG_FACTPS);
   14675 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14676 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14677 			device_xname(sc->sc_dev), factps, data));
   14678 		if (((factps & FACTPS_MNGCG) == 0)
   14679 		    && ((data & NVM_CFG2_MNGM_MASK)
   14680 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14681 			return 1;
   14682 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14683 	    && ((manc & MANC_ASF_EN) == 0))
   14684 		return 1;
   14685 
   14686 	return 0;
   14687 }
   14688 
   14689 static bool
   14690 wm_phy_resetisblocked(struct wm_softc *sc)
   14691 {
   14692 	bool blocked = false;
   14693 	uint32_t reg;
   14694 	int i = 0;
   14695 
   14696 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   14697 		device_xname(sc->sc_dev), __func__));
   14698 
   14699 	switch (sc->sc_type) {
   14700 	case WM_T_ICH8:
   14701 	case WM_T_ICH9:
   14702 	case WM_T_ICH10:
   14703 	case WM_T_PCH:
   14704 	case WM_T_PCH2:
   14705 	case WM_T_PCH_LPT:
   14706 	case WM_T_PCH_SPT:
   14707 	case WM_T_PCH_CNP:
   14708 		do {
   14709 			reg = CSR_READ(sc, WMREG_FWSM);
   14710 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14711 				blocked = true;
   14712 				delay(10*1000);
   14713 				continue;
   14714 			}
   14715 			blocked = false;
   14716 		} while (blocked && (i++ < 30));
   14717 		return blocked;
   14718 		break;
   14719 	case WM_T_82571:
   14720 	case WM_T_82572:
   14721 	case WM_T_82573:
   14722 	case WM_T_82574:
   14723 	case WM_T_82583:
   14724 	case WM_T_80003:
   14725 		reg = CSR_READ(sc, WMREG_MANC);
   14726 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14727 			return true;
   14728 		else
   14729 			return false;
   14730 		break;
   14731 	default:
   14732 		/* No problem */
   14733 		break;
   14734 	}
   14735 
   14736 	return false;
   14737 }
   14738 
   14739 static void
   14740 wm_get_hw_control(struct wm_softc *sc)
   14741 {
   14742 	uint32_t reg;
   14743 
   14744 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14745 		device_xname(sc->sc_dev), __func__));
   14746 
   14747 	if (sc->sc_type == WM_T_82573) {
   14748 		reg = CSR_READ(sc, WMREG_SWSM);
   14749 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14750 	} else if (sc->sc_type >= WM_T_82571) {
   14751 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14752 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14753 	}
   14754 }
   14755 
   14756 static void
   14757 wm_release_hw_control(struct wm_softc *sc)
   14758 {
   14759 	uint32_t reg;
   14760 
   14761 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14762 		device_xname(sc->sc_dev), __func__));
   14763 
   14764 	if (sc->sc_type == WM_T_82573) {
   14765 		reg = CSR_READ(sc, WMREG_SWSM);
   14766 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14767 	} else if (sc->sc_type >= WM_T_82571) {
   14768 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14769 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14770 	}
   14771 }
   14772 
   14773 static void
   14774 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14775 {
   14776 	uint32_t reg;
   14777 
   14778 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   14779 		device_xname(sc->sc_dev), __func__));
   14780 
   14781 	if (sc->sc_type < WM_T_PCH2)
   14782 		return;
   14783 
   14784 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14785 
   14786 	if (gate)
   14787 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14788 	else
   14789 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14790 
   14791 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14792 }
   14793 
   14794 static int
   14795 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   14796 {
   14797 	uint32_t fwsm, reg;
   14798 	int rv = 0;
   14799 
   14800 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   14801 		device_xname(sc->sc_dev), __func__));
   14802 
   14803 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   14804 	wm_gate_hw_phy_config_ich8lan(sc, true);
   14805 
   14806 	/* Disable ULP */
   14807 	wm_ulp_disable(sc);
   14808 
   14809 	/* Acquire PHY semaphore */
   14810 	rv = sc->phy.acquire(sc);
   14811 	if (rv != 0) {
   14812 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   14813 		device_xname(sc->sc_dev), __func__));
   14814 		return -1;
   14815 	}
   14816 
   14817 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   14818 	 * inaccessible and resetting the PHY is not blocked, toggle the
   14819 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   14820 	 */
   14821 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14822 	switch (sc->sc_type) {
   14823 	case WM_T_PCH_LPT:
   14824 	case WM_T_PCH_SPT:
   14825 	case WM_T_PCH_CNP:
   14826 		if (wm_phy_is_accessible_pchlan(sc))
   14827 			break;
   14828 
   14829 		/* Before toggling LANPHYPC, see if PHY is accessible by
   14830 		 * forcing MAC to SMBus mode first.
   14831 		 */
   14832 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14833 		reg |= CTRL_EXT_FORCE_SMBUS;
   14834 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14835 #if 0
   14836 		/* XXX Isn't this required??? */
   14837 		CSR_WRITE_FLUSH(sc);
   14838 #endif
   14839 		/* Wait 50 milliseconds for MAC to finish any retries
   14840 		 * that it might be trying to perform from previous
   14841 		 * attempts to acknowledge any phy read requests.
   14842 		 */
   14843 		delay(50 * 1000);
   14844 		/* FALLTHROUGH */
   14845 	case WM_T_PCH2:
   14846 		if (wm_phy_is_accessible_pchlan(sc) == true)
   14847 			break;
   14848 		/* FALLTHROUGH */
   14849 	case WM_T_PCH:
   14850 		if (sc->sc_type == WM_T_PCH)
   14851 			if ((fwsm & FWSM_FW_VALID) != 0)
   14852 				break;
   14853 
   14854 		if (wm_phy_resetisblocked(sc) == true) {
   14855 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   14856 			break;
   14857 		}
   14858 
   14859 		/* Toggle LANPHYPC Value bit */
   14860 		wm_toggle_lanphypc_pch_lpt(sc);
   14861 
   14862 		if (sc->sc_type >= WM_T_PCH_LPT) {
   14863 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14864 				break;
   14865 
   14866 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   14867 			 * so ensure that the MAC is also out of SMBus mode
   14868 			 */
   14869 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14870 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14871 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14872 
   14873 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14874 				break;
   14875 			rv = -1;
   14876 		}
   14877 		break;
   14878 	default:
   14879 		break;
   14880 	}
   14881 
   14882 	/* Release semaphore */
   14883 	sc->phy.release(sc);
   14884 
   14885 	if (rv == 0) {
   14886 		/* Check to see if able to reset PHY.  Print error if not */
   14887 		if (wm_phy_resetisblocked(sc)) {
   14888 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   14889 			goto out;
   14890 		}
   14891 
   14892 		/* Reset the PHY before any access to it.  Doing so, ensures
   14893 		 * that the PHY is in a known good state before we read/write
   14894 		 * PHY registers.  The generic reset is sufficient here,
   14895 		 * because we haven't determined the PHY type yet.
   14896 		 */
   14897 		if (wm_reset_phy(sc) != 0)
   14898 			goto out;
   14899 
   14900 		/* On a successful reset, possibly need to wait for the PHY
   14901 		 * to quiesce to an accessible state before returning control
   14902 		 * to the calling function.  If the PHY does not quiesce, then
   14903 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   14904 		 *  the PHY is in.
   14905 		 */
   14906 		if (wm_phy_resetisblocked(sc))
   14907 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   14908 	}
   14909 
   14910 out:
   14911 	/* Ungate automatic PHY configuration on non-managed 82579 */
   14912 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   14913 		delay(10*1000);
   14914 		wm_gate_hw_phy_config_ich8lan(sc, false);
   14915 	}
   14916 
   14917 	return 0;
   14918 }
   14919 
   14920 static void
   14921 wm_init_manageability(struct wm_softc *sc)
   14922 {
   14923 
   14924 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   14925 		device_xname(sc->sc_dev), __func__));
   14926 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14927 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   14928 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14929 
   14930 		/* Disable hardware interception of ARP */
   14931 		manc &= ~MANC_ARP_EN;
   14932 
   14933 		/* Enable receiving management packets to the host */
   14934 		if (sc->sc_type >= WM_T_82571) {
   14935 			manc |= MANC_EN_MNG2HOST;
   14936 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   14937 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   14938 		}
   14939 
   14940 		CSR_WRITE(sc, WMREG_MANC, manc);
   14941 	}
   14942 }
   14943 
   14944 static void
   14945 wm_release_manageability(struct wm_softc *sc)
   14946 {
   14947 
   14948 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14949 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14950 
   14951 		manc |= MANC_ARP_EN;
   14952 		if (sc->sc_type >= WM_T_82571)
   14953 			manc &= ~MANC_EN_MNG2HOST;
   14954 
   14955 		CSR_WRITE(sc, WMREG_MANC, manc);
   14956 	}
   14957 }
   14958 
   14959 static void
   14960 wm_get_wakeup(struct wm_softc *sc)
   14961 {
   14962 
   14963 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   14964 	switch (sc->sc_type) {
   14965 	case WM_T_82573:
   14966 	case WM_T_82583:
   14967 		sc->sc_flags |= WM_F_HAS_AMT;
   14968 		/* FALLTHROUGH */
   14969 	case WM_T_80003:
   14970 	case WM_T_82575:
   14971 	case WM_T_82576:
   14972 	case WM_T_82580:
   14973 	case WM_T_I350:
   14974 	case WM_T_I354:
   14975 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   14976 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   14977 		/* FALLTHROUGH */
   14978 	case WM_T_82541:
   14979 	case WM_T_82541_2:
   14980 	case WM_T_82547:
   14981 	case WM_T_82547_2:
   14982 	case WM_T_82571:
   14983 	case WM_T_82572:
   14984 	case WM_T_82574:
   14985 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14986 		break;
   14987 	case WM_T_ICH8:
   14988 	case WM_T_ICH9:
   14989 	case WM_T_ICH10:
   14990 	case WM_T_PCH:
   14991 	case WM_T_PCH2:
   14992 	case WM_T_PCH_LPT:
   14993 	case WM_T_PCH_SPT:
   14994 	case WM_T_PCH_CNP:
   14995 		sc->sc_flags |= WM_F_HAS_AMT;
   14996 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14997 		break;
   14998 	default:
   14999 		break;
   15000 	}
   15001 
   15002 	/* 1: HAS_MANAGE */
   15003 	if (wm_enable_mng_pass_thru(sc) != 0)
   15004 		sc->sc_flags |= WM_F_HAS_MANAGE;
   15005 
   15006 	/*
   15007 	 * Note that the WOL flags is set after the resetting of the eeprom
   15008 	 * stuff
   15009 	 */
   15010 }
   15011 
   15012 /*
   15013  * Unconfigure Ultra Low Power mode.
   15014  * Only for I217 and newer (see below).
   15015  */
   15016 static int
   15017 wm_ulp_disable(struct wm_softc *sc)
   15018 {
   15019 	uint32_t reg;
   15020 	uint16_t phyreg;
   15021 	int i = 0, rv = 0;
   15022 
   15023 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15024 		device_xname(sc->sc_dev), __func__));
   15025 	/* Exclude old devices */
   15026 	if ((sc->sc_type < WM_T_PCH_LPT)
   15027 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   15028 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   15029 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   15030 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   15031 		return 0;
   15032 
   15033 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   15034 		/* Request ME un-configure ULP mode in the PHY */
   15035 		reg = CSR_READ(sc, WMREG_H2ME);
   15036 		reg &= ~H2ME_ULP;
   15037 		reg |= H2ME_ENFORCE_SETTINGS;
   15038 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15039 
   15040 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   15041 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   15042 			if (i++ == 30) {
   15043 				device_printf(sc->sc_dev, "%s timed out\n",
   15044 				    __func__);
   15045 				return -1;
   15046 			}
   15047 			delay(10 * 1000);
   15048 		}
   15049 		reg = CSR_READ(sc, WMREG_H2ME);
   15050 		reg &= ~H2ME_ENFORCE_SETTINGS;
   15051 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15052 
   15053 		return 0;
   15054 	}
   15055 
   15056 	/* Acquire semaphore */
   15057 	rv = sc->phy.acquire(sc);
   15058 	if (rv != 0) {
   15059 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   15060 		device_xname(sc->sc_dev), __func__));
   15061 		return -1;
   15062 	}
   15063 
   15064 	/* Toggle LANPHYPC */
   15065 	wm_toggle_lanphypc_pch_lpt(sc);
   15066 
   15067 	/* Unforce SMBus mode in PHY */
   15068 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   15069 	if (rv != 0) {
   15070 		uint32_t reg2;
   15071 
   15072 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   15073 			__func__);
   15074 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   15075 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   15076 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   15077 		delay(50 * 1000);
   15078 
   15079 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   15080 		    &phyreg);
   15081 		if (rv != 0)
   15082 			goto release;
   15083 	}
   15084 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15085 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   15086 
   15087 	/* Unforce SMBus mode in MAC */
   15088 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15089 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   15090 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15091 
   15092 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   15093 	if (rv != 0)
   15094 		goto release;
   15095 	phyreg |= HV_PM_CTRL_K1_ENA;
   15096 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   15097 
   15098 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   15099 		&phyreg);
   15100 	if (rv != 0)
   15101 		goto release;
   15102 	phyreg &= ~(I218_ULP_CONFIG1_IND
   15103 	    | I218_ULP_CONFIG1_STICKY_ULP
   15104 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   15105 	    | I218_ULP_CONFIG1_WOL_HOST
   15106 	    | I218_ULP_CONFIG1_INBAND_EXIT
   15107 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   15108 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   15109 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   15110 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15111 	phyreg |= I218_ULP_CONFIG1_START;
   15112 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15113 
   15114 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15115 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   15116 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15117 
   15118 release:
   15119 	/* Release semaphore */
   15120 	sc->phy.release(sc);
   15121 	wm_gmii_reset(sc);
   15122 	delay(50 * 1000);
   15123 
   15124 	return rv;
   15125 }
   15126 
   15127 /* WOL in the newer chipset interfaces (pchlan) */
   15128 static int
   15129 wm_enable_phy_wakeup(struct wm_softc *sc)
   15130 {
   15131 	device_t dev = sc->sc_dev;
   15132 	uint32_t mreg, moff;
   15133 	uint16_t wuce, wuc, wufc, preg;
   15134 	int i, rv;
   15135 
   15136 	KASSERT(sc->sc_type >= WM_T_PCH);
   15137 
   15138 	/* Copy MAC RARs to PHY RARs */
   15139 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   15140 
   15141 	/* Activate PHY wakeup */
   15142 	rv = sc->phy.acquire(sc);
   15143 	if (rv != 0) {
   15144 		device_printf(dev, "%s: failed to acquire semaphore\n",
   15145 		    __func__);
   15146 		return rv;
   15147 	}
   15148 
   15149 	/*
   15150 	 * Enable access to PHY wakeup registers.
   15151 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   15152 	 */
   15153 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   15154 	if (rv != 0) {
   15155 		device_printf(dev,
   15156 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   15157 		goto release;
   15158 	}
   15159 
   15160 	/* Copy MAC MTA to PHY MTA */
   15161 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   15162 		uint16_t lo, hi;
   15163 
   15164 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   15165 		lo = (uint16_t)(mreg & 0xffff);
   15166 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   15167 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   15168 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   15169 	}
   15170 
   15171 	/* Configure PHY Rx Control register */
   15172 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   15173 	mreg = CSR_READ(sc, WMREG_RCTL);
   15174 	if (mreg & RCTL_UPE)
   15175 		preg |= BM_RCTL_UPE;
   15176 	if (mreg & RCTL_MPE)
   15177 		preg |= BM_RCTL_MPE;
   15178 	preg &= ~(BM_RCTL_MO_MASK);
   15179 	moff = __SHIFTOUT(mreg, RCTL_MO);
   15180 	if (moff != 0)
   15181 		preg |= moff << BM_RCTL_MO_SHIFT;
   15182 	if (mreg & RCTL_BAM)
   15183 		preg |= BM_RCTL_BAM;
   15184 	if (mreg & RCTL_PMCF)
   15185 		preg |= BM_RCTL_PMCF;
   15186 	mreg = CSR_READ(sc, WMREG_CTRL);
   15187 	if (mreg & CTRL_RFCE)
   15188 		preg |= BM_RCTL_RFCE;
   15189 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   15190 
   15191 	wuc = WUC_APME | WUC_PME_EN;
   15192 	wufc = WUFC_MAG;
   15193 	/* Enable PHY wakeup in MAC register */
   15194 	CSR_WRITE(sc, WMREG_WUC,
   15195 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   15196 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   15197 
   15198 	/* Configure and enable PHY wakeup in PHY registers */
   15199 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   15200 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   15201 
   15202 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   15203 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15204 
   15205 release:
   15206 	sc->phy.release(sc);
   15207 
   15208 	return 0;
   15209 }
   15210 
   15211 /* Power down workaround on D3 */
   15212 static void
   15213 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   15214 {
   15215 	uint32_t reg;
   15216 	uint16_t phyreg;
   15217 	int i;
   15218 
   15219 	for (i = 0; i < 2; i++) {
   15220 		/* Disable link */
   15221 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15222 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15223 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15224 
   15225 		/*
   15226 		 * Call gig speed drop workaround on Gig disable before
   15227 		 * accessing any PHY registers
   15228 		 */
   15229 		if (sc->sc_type == WM_T_ICH8)
   15230 			wm_gig_downshift_workaround_ich8lan(sc);
   15231 
   15232 		/* Write VR power-down enable */
   15233 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15234 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15235 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   15236 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   15237 
   15238 		/* Read it back and test */
   15239 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15240 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15241 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   15242 			break;
   15243 
   15244 		/* Issue PHY reset and repeat at most one more time */
   15245 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   15246 	}
   15247 }
   15248 
   15249 /*
   15250  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   15251  *  @sc: pointer to the HW structure
   15252  *
   15253  *  During S0 to Sx transition, it is possible the link remains at gig
   15254  *  instead of negotiating to a lower speed.  Before going to Sx, set
   15255  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   15256  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   15257  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   15258  *  needs to be written.
   15259  *  Parts that support (and are linked to a partner which support) EEE in
   15260  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   15261  *  than 10Mbps w/o EEE.
   15262  */
   15263 static void
   15264 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   15265 {
   15266 	device_t dev = sc->sc_dev;
   15267 	struct ethercom *ec = &sc->sc_ethercom;
   15268 	uint32_t phy_ctrl;
   15269 	int rv;
   15270 
   15271 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   15272 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   15273 
   15274 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   15275 
   15276 	if (sc->sc_phytype == WMPHY_I217) {
   15277 		uint16_t devid = sc->sc_pcidevid;
   15278 
   15279 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   15280 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   15281 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   15282 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   15283 		    (sc->sc_type >= WM_T_PCH_SPT))
   15284 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   15285 			    CSR_READ(sc, WMREG_FEXTNVM6)
   15286 			    & ~FEXTNVM6_REQ_PLL_CLK);
   15287 
   15288 		if (sc->phy.acquire(sc) != 0)
   15289 			goto out;
   15290 
   15291 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15292 			uint16_t eee_advert;
   15293 
   15294 			rv = wm_read_emi_reg_locked(dev,
   15295 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   15296 			if (rv)
   15297 				goto release;
   15298 
   15299 			/*
   15300 			 * Disable LPLU if both link partners support 100BaseT
   15301 			 * EEE and 100Full is advertised on both ends of the
   15302 			 * link, and enable Auto Enable LPI since there will
   15303 			 * be no driver to enable LPI while in Sx.
   15304 			 */
   15305 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   15306 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   15307 				uint16_t anar, phy_reg;
   15308 
   15309 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   15310 				    &anar);
   15311 				if (anar & ANAR_TX_FD) {
   15312 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   15313 					    PHY_CTRL_NOND0A_LPLU);
   15314 
   15315 					/* Set Auto Enable LPI after link up */
   15316 					sc->phy.readreg_locked(dev, 2,
   15317 					    I217_LPI_GPIO_CTRL, &phy_reg);
   15318 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15319 					sc->phy.writereg_locked(dev, 2,
   15320 					    I217_LPI_GPIO_CTRL, phy_reg);
   15321 				}
   15322 			}
   15323 		}
   15324 
   15325 		/*
   15326 		 * For i217 Intel Rapid Start Technology support,
   15327 		 * when the system is going into Sx and no manageability engine
   15328 		 * is present, the driver must configure proxy to reset only on
   15329 		 * power good.	LPI (Low Power Idle) state must also reset only
   15330 		 * on power good, as well as the MTA (Multicast table array).
   15331 		 * The SMBus release must also be disabled on LCD reset.
   15332 		 */
   15333 
   15334 		/*
   15335 		 * Enable MTA to reset for Intel Rapid Start Technology
   15336 		 * Support
   15337 		 */
   15338 
   15339 release:
   15340 		sc->phy.release(sc);
   15341 	}
   15342 out:
   15343 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   15344 
   15345 	if (sc->sc_type == WM_T_ICH8)
   15346 		wm_gig_downshift_workaround_ich8lan(sc);
   15347 
   15348 	if (sc->sc_type >= WM_T_PCH) {
   15349 		wm_oem_bits_config_ich8lan(sc, false);
   15350 
   15351 		/* Reset PHY to activate OEM bits on 82577/8 */
   15352 		if (sc->sc_type == WM_T_PCH)
   15353 			wm_reset_phy(sc);
   15354 
   15355 		if (sc->phy.acquire(sc) != 0)
   15356 			return;
   15357 		wm_write_smbus_addr(sc);
   15358 		sc->phy.release(sc);
   15359 	}
   15360 }
   15361 
   15362 /*
   15363  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   15364  *  @sc: pointer to the HW structure
   15365  *
   15366  *  During Sx to S0 transitions on non-managed devices or managed devices
   15367  *  on which PHY resets are not blocked, if the PHY registers cannot be
   15368  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   15369  *  the PHY.
   15370  *  On i217, setup Intel Rapid Start Technology.
   15371  */
   15372 static int
   15373 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   15374 {
   15375 	device_t dev = sc->sc_dev;
   15376 	int rv;
   15377 
   15378 	if (sc->sc_type < WM_T_PCH2)
   15379 		return 0;
   15380 
   15381 	rv = wm_init_phy_workarounds_pchlan(sc);
   15382 	if (rv != 0)
   15383 		return -1;
   15384 
   15385 	/* For i217 Intel Rapid Start Technology support when the system
   15386 	 * is transitioning from Sx and no manageability engine is present
   15387 	 * configure SMBus to restore on reset, disable proxy, and enable
   15388 	 * the reset on MTA (Multicast table array).
   15389 	 */
   15390 	if (sc->sc_phytype == WMPHY_I217) {
   15391 		uint16_t phy_reg;
   15392 
   15393 		if (sc->phy.acquire(sc) != 0)
   15394 			return -1;
   15395 
   15396 		/* Clear Auto Enable LPI after link up */
   15397 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   15398 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15399 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   15400 
   15401 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15402 			/* Restore clear on SMB if no manageability engine
   15403 			 * is present
   15404 			 */
   15405 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   15406 			    &phy_reg);
   15407 			if (rv != 0)
   15408 				goto release;
   15409 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   15410 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   15411 
   15412 			/* Disable Proxy */
   15413 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   15414 		}
   15415 		/* Enable reset on MTA */
   15416 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   15417 		if (rv != 0)
   15418 			goto release;
   15419 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   15420 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   15421 
   15422 release:
   15423 		sc->phy.release(sc);
   15424 		return rv;
   15425 	}
   15426 
   15427 	return 0;
   15428 }
   15429 
   15430 static void
   15431 wm_enable_wakeup(struct wm_softc *sc)
   15432 {
   15433 	uint32_t reg, pmreg;
   15434 	pcireg_t pmode;
   15435 	int rv = 0;
   15436 
   15437 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15438 		device_xname(sc->sc_dev), __func__));
   15439 
   15440 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15441 	    &pmreg, NULL) == 0)
   15442 		return;
   15443 
   15444 	if ((sc->sc_flags & WM_F_WOL) == 0)
   15445 		goto pme;
   15446 
   15447 	/* Advertise the wakeup capability */
   15448 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15449 	    | CTRL_SWDPIN(3));
   15450 
   15451 	/* Keep the laser running on fiber adapters */
   15452 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15453 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15454 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15455 		reg |= CTRL_EXT_SWDPIN(3);
   15456 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15457 	}
   15458 
   15459 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15460 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15461 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15462 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15463 		wm_suspend_workarounds_ich8lan(sc);
   15464 
   15465 #if 0	/* For the multicast packet */
   15466 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15467 	reg |= WUFC_MC;
   15468 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15469 #endif
   15470 
   15471 	if (sc->sc_type >= WM_T_PCH) {
   15472 		rv = wm_enable_phy_wakeup(sc);
   15473 		if (rv != 0)
   15474 			goto pme;
   15475 	} else {
   15476 		/* Enable wakeup by the MAC */
   15477 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15478 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15479 	}
   15480 
   15481 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15482 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15483 		|| (sc->sc_type == WM_T_PCH2))
   15484 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15485 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15486 
   15487 pme:
   15488 	/* Request PME */
   15489 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15490 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   15491 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15492 		/* For WOL */
   15493 		pmode |= PCI_PMCSR_PME_EN;
   15494 	} else {
   15495 		/* Disable WOL */
   15496 		pmode &= ~PCI_PMCSR_PME_EN;
   15497 	}
   15498 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15499 }
   15500 
   15501 /* Disable ASPM L0s and/or L1 for workaround */
   15502 static void
   15503 wm_disable_aspm(struct wm_softc *sc)
   15504 {
   15505 	pcireg_t reg, mask = 0;
   15506 	unsigned const char *str = "";
   15507 
   15508 	/*
   15509 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15510 	 * space.
   15511 	 */
   15512 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15513 		return;
   15514 
   15515 	switch (sc->sc_type) {
   15516 	case WM_T_82571:
   15517 	case WM_T_82572:
   15518 		/*
   15519 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15520 		 * State Power management L1 State (ASPM L1).
   15521 		 */
   15522 		mask = PCIE_LCSR_ASPM_L1;
   15523 		str = "L1 is";
   15524 		break;
   15525 	case WM_T_82573:
   15526 	case WM_T_82574:
   15527 	case WM_T_82583:
   15528 		/*
   15529 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15530 		 *
   15531 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15532 		 * some chipset.  The document of 82574 and 82583 says that
   15533 		 * disabling L0s with some specific chipset is sufficient,
   15534 		 * but we follow as of the Intel em driver does.
   15535 		 *
   15536 		 * References:
   15537 		 * Errata 8 of the Specification Update of i82573.
   15538 		 * Errata 20 of the Specification Update of i82574.
   15539 		 * Errata 9 of the Specification Update of i82583.
   15540 		 */
   15541 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15542 		str = "L0s and L1 are";
   15543 		break;
   15544 	default:
   15545 		return;
   15546 	}
   15547 
   15548 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15549 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15550 	reg &= ~mask;
   15551 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15552 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15553 
   15554 	/* Print only in wm_attach() */
   15555 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15556 		aprint_verbose_dev(sc->sc_dev,
   15557 		    "ASPM %s disabled to workaround the errata.\n", str);
   15558 }
   15559 
   15560 /* LPLU */
   15561 
   15562 static void
   15563 wm_lplu_d0_disable(struct wm_softc *sc)
   15564 {
   15565 	struct mii_data *mii = &sc->sc_mii;
   15566 	uint32_t reg;
   15567 	uint16_t phyval;
   15568 
   15569 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15570 		device_xname(sc->sc_dev), __func__));
   15571 
   15572 	if (sc->sc_phytype == WMPHY_IFE)
   15573 		return;
   15574 
   15575 	switch (sc->sc_type) {
   15576 	case WM_T_82571:
   15577 	case WM_T_82572:
   15578 	case WM_T_82573:
   15579 	case WM_T_82575:
   15580 	case WM_T_82576:
   15581 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
   15582 		phyval &= ~PMR_D0_LPLU;
   15583 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
   15584 		break;
   15585 	case WM_T_82580:
   15586 	case WM_T_I350:
   15587 	case WM_T_I210:
   15588 	case WM_T_I211:
   15589 		reg = CSR_READ(sc, WMREG_PHPM);
   15590 		reg &= ~PHPM_D0A_LPLU;
   15591 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15592 		break;
   15593 	case WM_T_82574:
   15594 	case WM_T_82583:
   15595 	case WM_T_ICH8:
   15596 	case WM_T_ICH9:
   15597 	case WM_T_ICH10:
   15598 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15599 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15600 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15601 		CSR_WRITE_FLUSH(sc);
   15602 		break;
   15603 	case WM_T_PCH:
   15604 	case WM_T_PCH2:
   15605 	case WM_T_PCH_LPT:
   15606 	case WM_T_PCH_SPT:
   15607 	case WM_T_PCH_CNP:
   15608 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15609 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15610 		if (wm_phy_resetisblocked(sc) == false)
   15611 			phyval |= HV_OEM_BITS_ANEGNOW;
   15612 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15613 		break;
   15614 	default:
   15615 		break;
   15616 	}
   15617 }
   15618 
   15619 /* EEE */
   15620 
   15621 static int
   15622 wm_set_eee_i350(struct wm_softc *sc)
   15623 {
   15624 	struct ethercom *ec = &sc->sc_ethercom;
   15625 	uint32_t ipcnfg, eeer;
   15626 	uint32_t ipcnfg_mask
   15627 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15628 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15629 
   15630 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15631 
   15632 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15633 	eeer = CSR_READ(sc, WMREG_EEER);
   15634 
   15635 	/* Enable or disable per user setting */
   15636 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15637 		ipcnfg |= ipcnfg_mask;
   15638 		eeer |= eeer_mask;
   15639 	} else {
   15640 		ipcnfg &= ~ipcnfg_mask;
   15641 		eeer &= ~eeer_mask;
   15642 	}
   15643 
   15644 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15645 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15646 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15647 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15648 
   15649 	return 0;
   15650 }
   15651 
   15652 static int
   15653 wm_set_eee_pchlan(struct wm_softc *sc)
   15654 {
   15655 	device_t dev = sc->sc_dev;
   15656 	struct ethercom *ec = &sc->sc_ethercom;
   15657 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15658 	int rv = 0;
   15659 
   15660 	switch (sc->sc_phytype) {
   15661 	case WMPHY_82579:
   15662 		lpa = I82579_EEE_LP_ABILITY;
   15663 		pcs_status = I82579_EEE_PCS_STATUS;
   15664 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15665 		break;
   15666 	case WMPHY_I217:
   15667 		lpa = I217_EEE_LP_ABILITY;
   15668 		pcs_status = I217_EEE_PCS_STATUS;
   15669 		adv_addr = I217_EEE_ADVERTISEMENT;
   15670 		break;
   15671 	default:
   15672 		return 0;
   15673 	}
   15674 
   15675 	if (sc->phy.acquire(sc)) {
   15676 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15677 		return 0;
   15678 	}
   15679 
   15680 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15681 	if (rv != 0)
   15682 		goto release;
   15683 
   15684 	/* Clear bits that enable EEE in various speeds */
   15685 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15686 
   15687 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15688 		/* Save off link partner's EEE ability */
   15689 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15690 		if (rv != 0)
   15691 			goto release;
   15692 
   15693 		/* Read EEE advertisement */
   15694 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15695 			goto release;
   15696 
   15697 		/*
   15698 		 * Enable EEE only for speeds in which the link partner is
   15699 		 * EEE capable and for which we advertise EEE.
   15700 		 */
   15701 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15702 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15703 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15704 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15705 			if ((data & ANLPAR_TX_FD) != 0)
   15706 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15707 			else {
   15708 				/*
   15709 				 * EEE is not supported in 100Half, so ignore
   15710 				 * partner's EEE in 100 ability if full-duplex
   15711 				 * is not advertised.
   15712 				 */
   15713 				sc->eee_lp_ability
   15714 				    &= ~AN_EEEADVERT_100_TX;
   15715 			}
   15716 		}
   15717 	}
   15718 
   15719 	if (sc->sc_phytype == WMPHY_82579) {
   15720 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15721 		if (rv != 0)
   15722 			goto release;
   15723 
   15724 		data &= ~I82579_LPI_PLL_SHUT_100;
   15725 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15726 	}
   15727 
   15728 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15729 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15730 		goto release;
   15731 
   15732 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15733 release:
   15734 	sc->phy.release(sc);
   15735 
   15736 	return rv;
   15737 }
   15738 
   15739 static int
   15740 wm_set_eee(struct wm_softc *sc)
   15741 {
   15742 	struct ethercom *ec = &sc->sc_ethercom;
   15743 
   15744 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15745 		return 0;
   15746 
   15747 	if (sc->sc_type == WM_T_I354) {
   15748 		/* I354 uses an external PHY */
   15749 		return 0; /* not yet */
   15750 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15751 		return wm_set_eee_i350(sc);
   15752 	else if (sc->sc_type >= WM_T_PCH2)
   15753 		return wm_set_eee_pchlan(sc);
   15754 
   15755 	return 0;
   15756 }
   15757 
   15758 /*
   15759  * Workarounds (mainly PHY related).
   15760  * Basically, PHY's workarounds are in the PHY drivers.
   15761  */
   15762 
   15763 /* Work-around for 82566 Kumeran PCS lock loss */
   15764 static int
   15765 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15766 {
   15767 	struct mii_data *mii = &sc->sc_mii;
   15768 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15769 	int i, reg, rv;
   15770 	uint16_t phyreg;
   15771 
   15772 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15773 		device_xname(sc->sc_dev), __func__));
   15774 
   15775 	/* If the link is not up, do nothing */
   15776 	if ((status & STATUS_LU) == 0)
   15777 		return 0;
   15778 
   15779 	/* Nothing to do if the link is other than 1Gbps */
   15780 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15781 		return 0;
   15782 
   15783 	for (i = 0; i < 10; i++) {
   15784 		/* read twice */
   15785 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15786 		if (rv != 0)
   15787 			return rv;
   15788 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15789 		if (rv != 0)
   15790 			return rv;
   15791 
   15792 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   15793 			goto out;	/* GOOD! */
   15794 
   15795 		/* Reset the PHY */
   15796 		wm_reset_phy(sc);
   15797 		delay(5*1000);
   15798 	}
   15799 
   15800 	/* Disable GigE link negotiation */
   15801 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15802 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15803 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15804 
   15805 	/*
   15806 	 * Call gig speed drop workaround on Gig disable before accessing
   15807 	 * any PHY registers.
   15808 	 */
   15809 	wm_gig_downshift_workaround_ich8lan(sc);
   15810 
   15811 out:
   15812 	return 0;
   15813 }
   15814 
   15815 /*
   15816  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   15817  *  @sc: pointer to the HW structure
   15818  *
   15819  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   15820  *  LPLU, Gig disable, MDIC PHY reset):
   15821  *    1) Set Kumeran Near-end loopback
   15822  *    2) Clear Kumeran Near-end loopback
   15823  *  Should only be called for ICH8[m] devices with any 1G Phy.
   15824  */
   15825 static void
   15826 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   15827 {
   15828 	uint16_t kmreg;
   15829 
   15830 	/* Only for igp3 */
   15831 	if (sc->sc_phytype == WMPHY_IGP_3) {
   15832 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   15833 			return;
   15834 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   15835 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   15836 			return;
   15837 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   15838 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   15839 	}
   15840 }
   15841 
   15842 /*
   15843  * Workaround for pch's PHYs
   15844  * XXX should be moved to new PHY driver?
   15845  */
   15846 static int
   15847 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15848 {
   15849 	device_t dev = sc->sc_dev;
   15850 	struct mii_data *mii = &sc->sc_mii;
   15851 	struct mii_softc *child;
   15852 	uint16_t phy_data, phyrev = 0;
   15853 	int phytype = sc->sc_phytype;
   15854 	int rv;
   15855 
   15856 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15857 		device_xname(dev), __func__));
   15858 	KASSERT(sc->sc_type == WM_T_PCH);
   15859 
   15860 	/* Set MDIO slow mode before any other MDIO access */
   15861 	if (phytype == WMPHY_82577)
   15862 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   15863 			return rv;
   15864 
   15865 	child = LIST_FIRST(&mii->mii_phys);
   15866 	if (child != NULL)
   15867 		phyrev = child->mii_mpd_rev;
   15868 
   15869 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   15870 	if ((child != NULL) &&
   15871 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   15872 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   15873 		/* Disable generation of early preamble (0x4431) */
   15874 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15875 		    &phy_data);
   15876 		if (rv != 0)
   15877 			return rv;
   15878 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   15879 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   15880 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15881 		    phy_data);
   15882 		if (rv != 0)
   15883 			return rv;
   15884 
   15885 		/* Preamble tuning for SSC */
   15886 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   15887 		if (rv != 0)
   15888 			return rv;
   15889 	}
   15890 
   15891 	/* 82578 */
   15892 	if (phytype == WMPHY_82578) {
   15893 		/*
   15894 		 * Return registers to default by doing a soft reset then
   15895 		 * writing 0x3140 to the control register
   15896 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   15897 		 */
   15898 		if ((child != NULL) && (phyrev < 2)) {
   15899 			PHY_RESET(child);
   15900 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   15901 			if (rv != 0)
   15902 				return rv;
   15903 		}
   15904 	}
   15905 
   15906 	/* Select page 0 */
   15907 	if ((rv = sc->phy.acquire(sc)) != 0)
   15908 		return rv;
   15909 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
   15910 	sc->phy.release(sc);
   15911 	if (rv != 0)
   15912 		return rv;
   15913 
   15914 	/*
   15915 	 * Configure the K1 Si workaround during phy reset assuming there is
   15916 	 * link so that it disables K1 if link is in 1Gbps.
   15917 	 */
   15918 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   15919 		return rv;
   15920 
   15921 	/* Workaround for link disconnects on a busy hub in half duplex */
   15922 	rv = sc->phy.acquire(sc);
   15923 	if (rv)
   15924 		return rv;
   15925 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   15926 	if (rv)
   15927 		goto release;
   15928 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   15929 	    phy_data & 0x00ff);
   15930 	if (rv)
   15931 		goto release;
   15932 
   15933 	/* Set MSE higher to enable link to stay up when noise is high */
   15934 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   15935 release:
   15936 	sc->phy.release(sc);
   15937 
   15938 	return rv;
   15939 }
   15940 
   15941 /*
   15942  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   15943  *  @sc:   pointer to the HW structure
   15944  */
   15945 static void
   15946 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   15947 {
   15948 
   15949 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15950 		device_xname(sc->sc_dev), __func__));
   15951 
   15952 	if (sc->phy.acquire(sc) != 0)
   15953 		return;
   15954 
   15955 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   15956 
   15957 	sc->phy.release(sc);
   15958 }
   15959 
   15960 static void
   15961 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
   15962 {
   15963 	device_t dev = sc->sc_dev;
   15964 	uint32_t mac_reg;
   15965 	uint16_t i, wuce;
   15966 	int count;
   15967 
   15968 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15969 		device_xname(dev), __func__));
   15970 
   15971 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   15972 		return;
   15973 
   15974 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   15975 	count = wm_rar_count(sc);
   15976 	for (i = 0; i < count; i++) {
   15977 		uint16_t lo, hi;
   15978 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   15979 		lo = (uint16_t)(mac_reg & 0xffff);
   15980 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   15981 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   15982 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   15983 
   15984 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   15985 		lo = (uint16_t)(mac_reg & 0xffff);
   15986 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   15987 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   15988 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   15989 	}
   15990 
   15991 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15992 }
   15993 
   15994 /*
   15995  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
   15996  *  with 82579 PHY
   15997  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
   15998  */
   15999 static int
   16000 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
   16001 {
   16002 	device_t dev = sc->sc_dev;
   16003 	int rar_count;
   16004 	int rv;
   16005 	uint32_t mac_reg;
   16006 	uint16_t dft_ctrl, data;
   16007 	uint16_t i;
   16008 
   16009 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16010 		device_xname(dev), __func__));
   16011 
   16012 	if (sc->sc_type < WM_T_PCH2)
   16013 		return 0;
   16014 
   16015 	/* Acquire PHY semaphore */
   16016 	rv = sc->phy.acquire(sc);
   16017 	if (rv != 0)
   16018 		return rv;
   16019 
   16020 	/* Disable Rx path while enabling/disabling workaround */
   16021 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
   16022 	if (rv != 0)
   16023 		goto out;
   16024 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16025 	    dft_ctrl | (1 << 14));
   16026 	if (rv != 0)
   16027 		goto out;
   16028 
   16029 	if (enable) {
   16030 		/* Write Rx addresses (rar_entry_count for RAL/H, and
   16031 		 * SHRAL/H) and initial CRC values to the MAC
   16032 		 */
   16033 		rar_count = wm_rar_count(sc);
   16034 		for (i = 0; i < rar_count; i++) {
   16035 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
   16036 			uint32_t addr_high, addr_low;
   16037 
   16038 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16039 			if (!(addr_high & RAL_AV))
   16040 				continue;
   16041 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16042 			mac_addr[0] = (addr_low & 0xFF);
   16043 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
   16044 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
   16045 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
   16046 			mac_addr[4] = (addr_high & 0xFF);
   16047 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
   16048 
   16049 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
   16050 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
   16051 		}
   16052 
   16053 		/* Write Rx addresses to the PHY */
   16054 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16055 	}
   16056 
   16057 	/*
   16058 	 * If enable ==
   16059 	 *	true: Enable jumbo frame workaround in the MAC.
   16060 	 *	false: Write MAC register values back to h/w defaults.
   16061 	 */
   16062 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
   16063 	if (enable) {
   16064 		mac_reg &= ~(1 << 14);
   16065 		mac_reg |= (7 << 15);
   16066 	} else
   16067 		mac_reg &= ~(0xf << 14);
   16068 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
   16069 
   16070 	mac_reg = CSR_READ(sc, WMREG_RCTL);
   16071 	if (enable) {
   16072 		mac_reg |= RCTL_SECRC;
   16073 		sc->sc_rctl |= RCTL_SECRC;
   16074 		sc->sc_flags |= WM_F_CRC_STRIP;
   16075 	} else {
   16076 		mac_reg &= ~RCTL_SECRC;
   16077 		sc->sc_rctl &= ~RCTL_SECRC;
   16078 		sc->sc_flags &= ~WM_F_CRC_STRIP;
   16079 	}
   16080 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
   16081 
   16082 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
   16083 	if (rv != 0)
   16084 		goto out;
   16085 	if (enable)
   16086 		data |= 1 << 0;
   16087 	else
   16088 		data &= ~(1 << 0);
   16089 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
   16090 	if (rv != 0)
   16091 		goto out;
   16092 
   16093 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
   16094 	if (rv != 0)
   16095 		goto out;
   16096 	/*
   16097 	 * XXX FreeBSD and Linux do the same thing that they set the same value
   16098 	 * on both the enable case and the disable case. Is it correct?
   16099 	 */
   16100 	data &= ~(0xf << 8);
   16101 	data |= (0xb << 8);
   16102 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
   16103 	if (rv != 0)
   16104 		goto out;
   16105 
   16106 	/*
   16107 	 * If enable ==
   16108 	 *	true: Enable jumbo frame workaround in the PHY.
   16109 	 *	false: Write PHY register values back to h/w defaults.
   16110 	 */
   16111 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
   16112 	if (rv != 0)
   16113 		goto out;
   16114 	data &= ~(0x7F << 5);
   16115 	if (enable)
   16116 		data |= (0x37 << 5);
   16117 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
   16118 	if (rv != 0)
   16119 		goto out;
   16120 
   16121 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
   16122 	if (rv != 0)
   16123 		goto out;
   16124 	if (enable)
   16125 		data &= ~(1 << 13);
   16126 	else
   16127 		data |= (1 << 13);
   16128 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
   16129 	if (rv != 0)
   16130 		goto out;
   16131 
   16132 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
   16133 	if (rv != 0)
   16134 		goto out;
   16135 	data &= ~(0x3FF << 2);
   16136 	if (enable)
   16137 		data |= (I82579_TX_PTR_GAP << 2);
   16138 	else
   16139 		data |= (0x8 << 2);
   16140 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
   16141 	if (rv != 0)
   16142 		goto out;
   16143 
   16144 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
   16145 	    enable ? 0xf100 : 0x7e00);
   16146 	if (rv != 0)
   16147 		goto out;
   16148 
   16149 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
   16150 	if (rv != 0)
   16151 		goto out;
   16152 	if (enable)
   16153 		data |= 1 << 10;
   16154 	else
   16155 		data &= ~(1 << 10);
   16156 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
   16157 	if (rv != 0)
   16158 		goto out;
   16159 
   16160 	/* Re-enable Rx path after enabling/disabling workaround */
   16161 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16162 	    dft_ctrl & ~(1 << 14));
   16163 
   16164 out:
   16165 	sc->phy.release(sc);
   16166 
   16167 	return rv;
   16168 }
   16169 
   16170 /*
   16171  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   16172  *  done after every PHY reset.
   16173  */
   16174 static int
   16175 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16176 {
   16177 	device_t dev = sc->sc_dev;
   16178 	int rv;
   16179 
   16180 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16181 		device_xname(dev), __func__));
   16182 	KASSERT(sc->sc_type == WM_T_PCH2);
   16183 
   16184 	/* Set MDIO slow mode before any other MDIO access */
   16185 	rv = wm_set_mdio_slow_mode_hv(sc);
   16186 	if (rv != 0)
   16187 		return rv;
   16188 
   16189 	rv = sc->phy.acquire(sc);
   16190 	if (rv != 0)
   16191 		return rv;
   16192 	/* Set MSE higher to enable link to stay up when noise is high */
   16193 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   16194 	if (rv != 0)
   16195 		goto release;
   16196 	/* Drop link after 5 times MSE threshold was reached */
   16197 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   16198 release:
   16199 	sc->phy.release(sc);
   16200 
   16201 	return rv;
   16202 }
   16203 
   16204 /**
   16205  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   16206  *  @link: link up bool flag
   16207  *
   16208  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   16209  *  preventing further DMA write requests.  Workaround the issue by disabling
   16210  *  the de-assertion of the clock request when in 1Gpbs mode.
   16211  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   16212  *  speeds in order to avoid Tx hangs.
   16213  **/
   16214 static int
   16215 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   16216 {
   16217 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   16218 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16219 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   16220 	uint16_t phyreg;
   16221 
   16222 	if (link && (speed == STATUS_SPEED_1000)) {
   16223 		sc->phy.acquire(sc);
   16224 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16225 		    &phyreg);
   16226 		if (rv != 0)
   16227 			goto release;
   16228 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16229 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   16230 		if (rv != 0)
   16231 			goto release;
   16232 		delay(20);
   16233 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   16234 
   16235 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16236 		    &phyreg);
   16237 release:
   16238 		sc->phy.release(sc);
   16239 		return rv;
   16240 	}
   16241 
   16242 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   16243 
   16244 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   16245 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   16246 	    || !link
   16247 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   16248 		goto update_fextnvm6;
   16249 
   16250 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   16251 
   16252 	/* Clear link status transmit timeout */
   16253 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   16254 	if (speed == STATUS_SPEED_100) {
   16255 		/* Set inband Tx timeout to 5x10us for 100Half */
   16256 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16257 
   16258 		/* Do not extend the K1 entry latency for 100Half */
   16259 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16260 	} else {
   16261 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   16262 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16263 
   16264 		/* Extend the K1 entry latency for 10 Mbps */
   16265 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16266 	}
   16267 
   16268 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   16269 
   16270 update_fextnvm6:
   16271 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   16272 	return 0;
   16273 }
   16274 
   16275 /*
   16276  *  wm_k1_gig_workaround_hv - K1 Si workaround
   16277  *  @sc:   pointer to the HW structure
   16278  *  @link: link up bool flag
   16279  *
   16280  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   16281  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   16282  *  If link is down, the function will restore the default K1 setting located
   16283  *  in the NVM.
   16284  */
   16285 static int
   16286 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   16287 {
   16288 	int k1_enable = sc->sc_nvm_k1_enabled;
   16289 
   16290 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16291 		device_xname(sc->sc_dev), __func__));
   16292 
   16293 	if (sc->phy.acquire(sc) != 0)
   16294 		return -1;
   16295 
   16296 	if (link) {
   16297 		k1_enable = 0;
   16298 
   16299 		/* Link stall fix for link up */
   16300 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16301 		    0x0100);
   16302 	} else {
   16303 		/* Link stall fix for link down */
   16304 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16305 		    0x4100);
   16306 	}
   16307 
   16308 	wm_configure_k1_ich8lan(sc, k1_enable);
   16309 	sc->phy.release(sc);
   16310 
   16311 	return 0;
   16312 }
   16313 
   16314 /*
   16315  *  wm_k1_workaround_lv - K1 Si workaround
   16316  *  @sc:   pointer to the HW structure
   16317  *
   16318  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   16319  *  Disable K1 for 1000 and 100 speeds
   16320  */
   16321 static int
   16322 wm_k1_workaround_lv(struct wm_softc *sc)
   16323 {
   16324 	uint32_t reg;
   16325 	uint16_t phyreg;
   16326 	int rv;
   16327 
   16328 	if (sc->sc_type != WM_T_PCH2)
   16329 		return 0;
   16330 
   16331 	/* Set K1 beacon duration based on 10Mbps speed */
   16332 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   16333 	if (rv != 0)
   16334 		return rv;
   16335 
   16336 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   16337 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   16338 		if (phyreg &
   16339 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   16340 			/* LV 1G/100 Packet drop issue wa  */
   16341 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   16342 			    &phyreg);
   16343 			if (rv != 0)
   16344 				return rv;
   16345 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   16346 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   16347 			    phyreg);
   16348 			if (rv != 0)
   16349 				return rv;
   16350 		} else {
   16351 			/* For 10Mbps */
   16352 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   16353 			reg &= ~FEXTNVM4_BEACON_DURATION;
   16354 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   16355 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   16356 		}
   16357 	}
   16358 
   16359 	return 0;
   16360 }
   16361 
   16362 /*
   16363  *  wm_link_stall_workaround_hv - Si workaround
   16364  *  @sc: pointer to the HW structure
   16365  *
   16366  *  This function works around a Si bug where the link partner can get
   16367  *  a link up indication before the PHY does. If small packets are sent
   16368  *  by the link partner they can be placed in the packet buffer without
   16369  *  being properly accounted for by the PHY and will stall preventing
   16370  *  further packets from being received.  The workaround is to clear the
   16371  *  packet buffer after the PHY detects link up.
   16372  */
   16373 static int
   16374 wm_link_stall_workaround_hv(struct wm_softc *sc)
   16375 {
   16376 	uint16_t phyreg;
   16377 
   16378 	if (sc->sc_phytype != WMPHY_82578)
   16379 		return 0;
   16380 
   16381 	/* Do not apply workaround if in PHY loopback bit 14 set */
   16382 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   16383 	if ((phyreg & BMCR_LOOP) != 0)
   16384 		return 0;
   16385 
   16386 	/* Check if link is up and at 1Gbps */
   16387 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   16388 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16389 	    | BM_CS_STATUS_SPEED_MASK;
   16390 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16391 		| BM_CS_STATUS_SPEED_1000))
   16392 		return 0;
   16393 
   16394 	delay(200 * 1000);	/* XXX too big */
   16395 
   16396 	/* Flush the packets in the fifo buffer */
   16397 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16398 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   16399 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16400 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   16401 
   16402 	return 0;
   16403 }
   16404 
   16405 static int
   16406 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   16407 {
   16408 	int rv;
   16409 	uint16_t reg;
   16410 
   16411 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   16412 	if (rv != 0)
   16413 		return rv;
   16414 
   16415 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   16416 	    reg | HV_KMRN_MDIO_SLOW);
   16417 }
   16418 
   16419 /*
   16420  *  wm_configure_k1_ich8lan - Configure K1 power state
   16421  *  @sc: pointer to the HW structure
   16422  *  @enable: K1 state to configure
   16423  *
   16424  *  Configure the K1 power state based on the provided parameter.
   16425  *  Assumes semaphore already acquired.
   16426  */
   16427 static void
   16428 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   16429 {
   16430 	uint32_t ctrl, ctrl_ext, tmp;
   16431 	uint16_t kmreg;
   16432 	int rv;
   16433 
   16434 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16435 
   16436 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   16437 	if (rv != 0)
   16438 		return;
   16439 
   16440 	if (k1_enable)
   16441 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   16442 	else
   16443 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   16444 
   16445 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   16446 	if (rv != 0)
   16447 		return;
   16448 
   16449 	delay(20);
   16450 
   16451 	ctrl = CSR_READ(sc, WMREG_CTRL);
   16452 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   16453 
   16454 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   16455 	tmp |= CTRL_FRCSPD;
   16456 
   16457 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   16458 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   16459 	CSR_WRITE_FLUSH(sc);
   16460 	delay(20);
   16461 
   16462 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   16463 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   16464 	CSR_WRITE_FLUSH(sc);
   16465 	delay(20);
   16466 
   16467 	return;
   16468 }
   16469 
   16470 /* special case - for 82575 - need to do manual init ... */
   16471 static void
   16472 wm_reset_init_script_82575(struct wm_softc *sc)
   16473 {
   16474 	/*
   16475 	 * Remark: this is untested code - we have no board without EEPROM
   16476 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   16477 	 */
   16478 
   16479 	/* SerDes configuration via SERDESCTRL */
   16480 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   16481 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   16482 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   16483 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   16484 
   16485 	/* CCM configuration via CCMCTL register */
   16486 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   16487 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   16488 
   16489 	/* PCIe lanes configuration */
   16490 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   16491 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   16492 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   16493 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   16494 
   16495 	/* PCIe PLL Configuration */
   16496 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   16497 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   16498 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   16499 }
   16500 
   16501 static void
   16502 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   16503 {
   16504 	uint32_t reg;
   16505 	uint16_t nvmword;
   16506 	int rv;
   16507 
   16508 	if (sc->sc_type != WM_T_82580)
   16509 		return;
   16510 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   16511 		return;
   16512 
   16513 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   16514 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   16515 	if (rv != 0) {
   16516 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   16517 		    __func__);
   16518 		return;
   16519 	}
   16520 
   16521 	reg = CSR_READ(sc, WMREG_MDICNFG);
   16522 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   16523 		reg |= MDICNFG_DEST;
   16524 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   16525 		reg |= MDICNFG_COM_MDIO;
   16526 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16527 }
   16528 
   16529 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   16530 
   16531 static bool
   16532 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   16533 {
   16534 	uint32_t reg;
   16535 	uint16_t id1, id2;
   16536 	int i, rv;
   16537 
   16538 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16539 		device_xname(sc->sc_dev), __func__));
   16540 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16541 
   16542 	id1 = id2 = 0xffff;
   16543 	for (i = 0; i < 2; i++) {
   16544 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   16545 		    &id1);
   16546 		if ((rv != 0) || MII_INVALIDID(id1))
   16547 			continue;
   16548 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   16549 		    &id2);
   16550 		if ((rv != 0) || MII_INVALIDID(id2))
   16551 			continue;
   16552 		break;
   16553 	}
   16554 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   16555 		goto out;
   16556 
   16557 	/*
   16558 	 * In case the PHY needs to be in mdio slow mode,
   16559 	 * set slow mode and try to get the PHY id again.
   16560 	 */
   16561 	rv = 0;
   16562 	if (sc->sc_type < WM_T_PCH_LPT) {
   16563 		sc->phy.release(sc);
   16564 		wm_set_mdio_slow_mode_hv(sc);
   16565 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   16566 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   16567 		sc->phy.acquire(sc);
   16568 	}
   16569 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   16570 		device_printf(sc->sc_dev, "XXX return with false\n");
   16571 		return false;
   16572 	}
   16573 out:
   16574 	if (sc->sc_type >= WM_T_PCH_LPT) {
   16575 		/* Only unforce SMBus if ME is not active */
   16576 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16577 			uint16_t phyreg;
   16578 
   16579 			/* Unforce SMBus mode in PHY */
   16580 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   16581 			    CV_SMB_CTRL, &phyreg);
   16582 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   16583 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   16584 			    CV_SMB_CTRL, phyreg);
   16585 
   16586 			/* Unforce SMBus mode in MAC */
   16587 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16588 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16589 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16590 		}
   16591 	}
   16592 	return true;
   16593 }
   16594 
   16595 static void
   16596 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   16597 {
   16598 	uint32_t reg;
   16599 	int i;
   16600 
   16601 	/* Set PHY Config Counter to 50msec */
   16602 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   16603 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   16604 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   16605 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   16606 
   16607 	/* Toggle LANPHYPC */
   16608 	reg = CSR_READ(sc, WMREG_CTRL);
   16609 	reg |= CTRL_LANPHYPC_OVERRIDE;
   16610 	reg &= ~CTRL_LANPHYPC_VALUE;
   16611 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16612 	CSR_WRITE_FLUSH(sc);
   16613 	delay(1000);
   16614 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   16615 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16616 	CSR_WRITE_FLUSH(sc);
   16617 
   16618 	if (sc->sc_type < WM_T_PCH_LPT)
   16619 		delay(50 * 1000);
   16620 	else {
   16621 		i = 20;
   16622 
   16623 		do {
   16624 			delay(5 * 1000);
   16625 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   16626 		    && i--);
   16627 
   16628 		delay(30 * 1000);
   16629 	}
   16630 }
   16631 
   16632 static int
   16633 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   16634 {
   16635 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16636 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16637 	uint32_t rxa;
   16638 	uint16_t scale = 0, lat_enc = 0;
   16639 	int32_t obff_hwm = 0;
   16640 	int64_t lat_ns, value;
   16641 
   16642 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16643 		device_xname(sc->sc_dev), __func__));
   16644 
   16645 	if (link) {
   16646 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16647 		uint32_t status;
   16648 		uint16_t speed;
   16649 		pcireg_t preg;
   16650 
   16651 		status = CSR_READ(sc, WMREG_STATUS);
   16652 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16653 		case STATUS_SPEED_10:
   16654 			speed = 10;
   16655 			break;
   16656 		case STATUS_SPEED_100:
   16657 			speed = 100;
   16658 			break;
   16659 		case STATUS_SPEED_1000:
   16660 			speed = 1000;
   16661 			break;
   16662 		default:
   16663 			device_printf(sc->sc_dev, "Unknown speed "
   16664 			    "(status = %08x)\n", status);
   16665 			return -1;
   16666 		}
   16667 
   16668 		/* Rx Packet Buffer Allocation size (KB) */
   16669 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16670 
   16671 		/*
   16672 		 * Determine the maximum latency tolerated by the device.
   16673 		 *
   16674 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16675 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16676 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16677 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16678 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16679 		 */
   16680 		lat_ns = ((int64_t)rxa * 1024 -
   16681 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16682 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16683 		if (lat_ns < 0)
   16684 			lat_ns = 0;
   16685 		else
   16686 			lat_ns /= speed;
   16687 		value = lat_ns;
   16688 
   16689 		while (value > LTRV_VALUE) {
   16690 			scale ++;
   16691 			value = howmany(value, __BIT(5));
   16692 		}
   16693 		if (scale > LTRV_SCALE_MAX) {
   16694 			device_printf(sc->sc_dev,
   16695 			    "Invalid LTR latency scale %d\n", scale);
   16696 			return -1;
   16697 		}
   16698 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16699 
   16700 		/* Determine the maximum latency tolerated by the platform */
   16701 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16702 		    WM_PCI_LTR_CAP_LPT);
   16703 		max_snoop = preg & 0xffff;
   16704 		max_nosnoop = preg >> 16;
   16705 
   16706 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16707 
   16708 		if (lat_enc > max_ltr_enc) {
   16709 			lat_enc = max_ltr_enc;
   16710 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16711 			    * PCI_LTR_SCALETONS(
   16712 				    __SHIFTOUT(lat_enc,
   16713 					PCI_LTR_MAXSNOOPLAT_SCALE));
   16714 		}
   16715 
   16716 		if (lat_ns) {
   16717 			lat_ns *= speed * 1000;
   16718 			lat_ns /= 8;
   16719 			lat_ns /= 1000000000;
   16720 			obff_hwm = (int32_t)(rxa - lat_ns);
   16721 		}
   16722 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   16723 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   16724 			    "(rxa = %d, lat_ns = %d)\n",
   16725 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   16726 			return -1;
   16727 		}
   16728 	}
   16729 	/* Snoop and No-Snoop latencies the same */
   16730 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   16731 	CSR_WRITE(sc, WMREG_LTRV, reg);
   16732 
   16733 	/* Set OBFF high water mark */
   16734 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   16735 	reg |= obff_hwm;
   16736 	CSR_WRITE(sc, WMREG_SVT, reg);
   16737 
   16738 	/* Enable OBFF */
   16739 	reg = CSR_READ(sc, WMREG_SVCR);
   16740 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   16741 	CSR_WRITE(sc, WMREG_SVCR, reg);
   16742 
   16743 	return 0;
   16744 }
   16745 
   16746 /*
   16747  * I210 Errata 25 and I211 Errata 10
   16748  * Slow System Clock.
   16749  *
   16750  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   16751  */
   16752 static int
   16753 wm_pll_workaround_i210(struct wm_softc *sc)
   16754 {
   16755 	uint32_t mdicnfg, wuc;
   16756 	uint32_t reg;
   16757 	pcireg_t pcireg;
   16758 	uint32_t pmreg;
   16759 	uint16_t nvmword, tmp_nvmword;
   16760 	uint16_t phyval;
   16761 	bool wa_done = false;
   16762 	int i, rv = 0;
   16763 
   16764 	/* Get Power Management cap offset */
   16765 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16766 	    &pmreg, NULL) == 0)
   16767 		return -1;
   16768 
   16769 	/* Save WUC and MDICNFG registers */
   16770 	wuc = CSR_READ(sc, WMREG_WUC);
   16771 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   16772 
   16773 	reg = mdicnfg & ~MDICNFG_DEST;
   16774 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16775 
   16776 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   16777 		/*
   16778 		 * The default value of the Initialization Control Word 1
   16779 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   16780 		 */
   16781 		nvmword = INVM_DEFAULT_AL;
   16782 	}
   16783 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   16784 
   16785 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   16786 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   16787 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   16788 
   16789 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   16790 			rv = 0;
   16791 			break; /* OK */
   16792 		} else
   16793 			rv = -1;
   16794 
   16795 		wa_done = true;
   16796 		/* Directly reset the internal PHY */
   16797 		reg = CSR_READ(sc, WMREG_CTRL);
   16798 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   16799 
   16800 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16801 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   16802 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16803 
   16804 		CSR_WRITE(sc, WMREG_WUC, 0);
   16805 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   16806 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16807 
   16808 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16809 		    pmreg + PCI_PMCSR);
   16810 		pcireg |= PCI_PMCSR_STATE_D3;
   16811 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16812 		    pmreg + PCI_PMCSR, pcireg);
   16813 		delay(1000);
   16814 		pcireg &= ~PCI_PMCSR_STATE_D3;
   16815 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16816 		    pmreg + PCI_PMCSR, pcireg);
   16817 
   16818 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   16819 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16820 
   16821 		/* Restore WUC register */
   16822 		CSR_WRITE(sc, WMREG_WUC, wuc);
   16823 	}
   16824 
   16825 	/* Restore MDICNFG setting */
   16826 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   16827 	if (wa_done)
   16828 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   16829 	return rv;
   16830 }
   16831 
   16832 static void
   16833 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   16834 {
   16835 	uint32_t reg;
   16836 
   16837 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16838 		device_xname(sc->sc_dev), __func__));
   16839 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   16840 	    || (sc->sc_type == WM_T_PCH_CNP));
   16841 
   16842 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16843 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   16844 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16845 
   16846 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   16847 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   16848 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   16849 }
   16850 
   16851 /* Sysctl function */
   16852 #ifdef WM_DEBUG
   16853 static int
   16854 wm_sysctl_debug(SYSCTLFN_ARGS)
   16855 {
   16856 	struct sysctlnode node = *rnode;
   16857 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
   16858 	uint32_t dflags;
   16859 	int error;
   16860 
   16861 	dflags = sc->sc_debug;
   16862 	node.sysctl_data = &dflags;
   16863 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   16864 
   16865 	if (error || newp == NULL)
   16866 		return error;
   16867 
   16868 	sc->sc_debug = dflags;
   16869 
   16870 	return 0;
   16871 }
   16872 #endif
   16873