Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.770
      1 /*	$NetBSD: if_wm.c,v 1.770 2023/05/11 06:59:31 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.770 2023/05/11 06:59:31 msaitoh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_if_wm.h"
     89 #endif
     90 
     91 #include <sys/param.h>
     92 
     93 #include <sys/atomic.h>
     94 #include <sys/callout.h>
     95 #include <sys/cpu.h>
     96 #include <sys/device.h>
     97 #include <sys/errno.h>
     98 #include <sys/interrupt.h>
     99 #include <sys/ioctl.h>
    100 #include <sys/kernel.h>
    101 #include <sys/kmem.h>
    102 #include <sys/mbuf.h>
    103 #include <sys/pcq.h>
    104 #include <sys/queue.h>
    105 #include <sys/rndsource.h>
    106 #include <sys/socket.h>
    107 #include <sys/sysctl.h>
    108 #include <sys/syslog.h>
    109 #include <sys/systm.h>
    110 #include <sys/workqueue.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <net/rss_config.h>
    120 
    121 #include <netinet/in.h>			/* XXX for struct ip */
    122 #include <netinet/in_systm.h>		/* XXX for struct ip */
    123 #include <netinet/ip.h>			/* XXX for struct ip */
    124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    126 
    127 #include <sys/bus.h>
    128 #include <sys/intr.h>
    129 #include <machine/endian.h>
    130 
    131 #include <dev/mii/mii.h>
    132 #include <dev/mii/mdio.h>
    133 #include <dev/mii/miivar.h>
    134 #include <dev/mii/miidevs.h>
    135 #include <dev/mii/mii_bitbang.h>
    136 #include <dev/mii/ikphyreg.h>
    137 #include <dev/mii/igphyreg.h>
    138 #include <dev/mii/igphyvar.h>
    139 #include <dev/mii/inbmphyreg.h>
    140 #include <dev/mii/ihphyreg.h>
    141 #include <dev/mii/makphyreg.h>
    142 
    143 #include <dev/pci/pcireg.h>
    144 #include <dev/pci/pcivar.h>
    145 #include <dev/pci/pcidevs.h>
    146 
    147 #include <dev/pci/if_wmreg.h>
    148 #include <dev/pci/if_wmvar.h>
    149 
    150 #ifdef WM_DEBUG
    151 #define	WM_DEBUG_LINK		__BIT(0)
    152 #define	WM_DEBUG_TX		__BIT(1)
    153 #define	WM_DEBUG_RX		__BIT(2)
    154 #define	WM_DEBUG_GMII		__BIT(3)
    155 #define	WM_DEBUG_MANAGE		__BIT(4)
    156 #define	WM_DEBUG_NVM		__BIT(5)
    157 #define	WM_DEBUG_INIT		__BIT(6)
    158 #define	WM_DEBUG_LOCK		__BIT(7)
    159 
    160 #if 0
    161 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
    162 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
    163 	WM_DEBUG_LOCK
    164 #endif
    165 
    166 #define	DPRINTF(sc, x, y)			  \
    167 	do {					  \
    168 		if ((sc)->sc_debug & (x))	  \
    169 			printf y;		  \
    170 	} while (0)
    171 #else
    172 #define	DPRINTF(sc, x, y)	__nothing
    173 #endif /* WM_DEBUG */
    174 
    175 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    176 
    177 /*
    178  * This device driver's max interrupt numbers.
    179  */
    180 #define WM_MAX_NQUEUEINTR	16
    181 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    182 
    183 #ifndef WM_DISABLE_MSI
    184 #define	WM_DISABLE_MSI 0
    185 #endif
    186 #ifndef WM_DISABLE_MSIX
    187 #define	WM_DISABLE_MSIX 0
    188 #endif
    189 
    190 int wm_disable_msi = WM_DISABLE_MSI;
    191 int wm_disable_msix = WM_DISABLE_MSIX;
    192 
    193 #ifndef WM_WATCHDOG_TIMEOUT
    194 #define WM_WATCHDOG_TIMEOUT 5
    195 #endif
    196 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    197 
    198 /*
    199  * Transmit descriptor list size.  Due to errata, we can only have
    200  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    201  * on >= 82544. We tell the upper layers that they can queue a lot
    202  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    203  * of them at a time.
    204  *
    205  * We allow up to 64 DMA segments per packet.  Pathological packet
    206  * chains containing many small mbufs have been observed in zero-copy
    207  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    208  * m_defrag() is called to reduce it.
    209  */
    210 #define	WM_NTXSEGS		64
    211 #define	WM_IFQUEUELEN		256
    212 #define	WM_TXQUEUELEN_MAX	64
    213 #define	WM_TXQUEUELEN_MAX_82547	16
    214 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    215 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    216 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    217 #define	WM_NTXDESC_82542	256
    218 #define	WM_NTXDESC_82544	4096
    219 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    220 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    221 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    222 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    223 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    224 
    225 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    226 
    227 #define	WM_TXINTERQSIZE		256
    228 
    229 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    230 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    231 #endif
    232 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    233 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    234 #endif
    235 
    236 /*
    237  * Receive descriptor list size.  We have one Rx buffer for normal
    238  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    239  * packet.  We allocate 256 receive descriptors, each with a 2k
    240  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    241  */
    242 #define	WM_NRXDESC		256U
    243 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    244 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    245 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    246 
    247 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    248 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    249 #endif
    250 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    251 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    252 #endif
    253 
    254 typedef union txdescs {
    255 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    256 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    257 } txdescs_t;
    258 
    259 typedef union rxdescs {
    260 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    261 	ext_rxdesc_t	 sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    262 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    263 } rxdescs_t;
    264 
    265 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    266 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    267 
    268 /*
    269  * Software state for transmit jobs.
    270  */
    271 struct wm_txsoft {
    272 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    273 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    274 	int txs_firstdesc;		/* first descriptor in packet */
    275 	int txs_lastdesc;		/* last descriptor in packet */
    276 	int txs_ndesc;			/* # of descriptors used */
    277 };
    278 
    279 /*
    280  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    281  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    282  * them together.
    283  */
    284 struct wm_rxsoft {
    285 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    286 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    287 };
    288 
    289 #define WM_LINKUP_TIMEOUT	50
    290 
    291 static uint16_t swfwphysem[] = {
    292 	SWFW_PHY0_SM,
    293 	SWFW_PHY1_SM,
    294 	SWFW_PHY2_SM,
    295 	SWFW_PHY3_SM
    296 };
    297 
    298 static const uint32_t wm_82580_rxpbs_table[] = {
    299 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    300 };
    301 
    302 struct wm_softc;
    303 
    304 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
    305 #if !defined(WM_EVENT_COUNTERS)
    306 #define WM_EVENT_COUNTERS 1
    307 #endif
    308 #endif
    309 
    310 #ifdef WM_EVENT_COUNTERS
    311 #define WM_Q_EVCNT_DEFINE(qname, evname)				 \
    312 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    313 	struct evcnt qname##_ev_##evname
    314 
    315 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    316 	do {								\
    317 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    318 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    319 		    "%s%02d%s", #qname, (qnum), #evname);		\
    320 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    321 		    (evtype), NULL, (xname),				\
    322 		    (q)->qname##_##evname##_evcnt_name);		\
    323 	} while (0)
    324 
    325 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    326 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    327 
    328 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    329 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    330 
    331 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    332 	evcnt_detach(&(q)->qname##_ev_##evname)
    333 #endif /* WM_EVENT_COUNTERS */
    334 
    335 struct wm_txqueue {
    336 	kmutex_t *txq_lock;		/* lock for tx operations */
    337 
    338 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    339 
    340 	/* Software state for the transmit descriptors. */
    341 	int txq_num;			/* must be a power of two */
    342 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    343 
    344 	/* TX control data structures. */
    345 	int txq_ndesc;			/* must be a power of two */
    346 	size_t txq_descsize;		/* a tx descriptor size */
    347 	txdescs_t *txq_descs_u;
    348 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    349 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    350 	int txq_desc_rseg;		/* real number of control segment */
    351 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    352 #define	txq_descs	txq_descs_u->sctxu_txdescs
    353 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    354 
    355 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    356 
    357 	int txq_free;			/* number of free Tx descriptors */
    358 	int txq_next;			/* next ready Tx descriptor */
    359 
    360 	int txq_sfree;			/* number of free Tx jobs */
    361 	int txq_snext;			/* next free Tx job */
    362 	int txq_sdirty;			/* dirty Tx jobs */
    363 
    364 	/* These 4 variables are used only on the 82547. */
    365 	int txq_fifo_size;		/* Tx FIFO size */
    366 	int txq_fifo_head;		/* current head of FIFO */
    367 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    368 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    369 
    370 	/*
    371 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    372 	 * CPUs. This queue intermediate them without block.
    373 	 */
    374 	pcq_t *txq_interq;
    375 
    376 	/*
    377 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    378 	 * to manage Tx H/W queue's busy flag.
    379 	 */
    380 	int txq_flags;			/* flags for H/W queue, see below */
    381 #define	WM_TXQ_NO_SPACE		0x1
    382 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
    383 
    384 	bool txq_stopping;
    385 
    386 	bool txq_sending;
    387 	time_t txq_lastsent;
    388 
    389 	/* Checksum flags used for previous packet */
    390 	uint32_t	txq_last_hw_cmd;
    391 	uint8_t		txq_last_hw_fields;
    392 	uint16_t	txq_last_hw_ipcs;
    393 	uint16_t	txq_last_hw_tucs;
    394 
    395 	uint32_t txq_packets;		/* for AIM */
    396 	uint32_t txq_bytes;		/* for AIM */
    397 #ifdef WM_EVENT_COUNTERS
    398 	/* TX event counters */
    399 	WM_Q_EVCNT_DEFINE(txq, txsstall);   /* Stalled due to no txs */
    400 	WM_Q_EVCNT_DEFINE(txq, txdstall);   /* Stalled due to no txd */
    401 	WM_Q_EVCNT_DEFINE(txq, fifo_stall); /* FIFO stalls (82547) */
    402 	WM_Q_EVCNT_DEFINE(txq, txdw);	    /* Tx descriptor interrupts */
    403 	WM_Q_EVCNT_DEFINE(txq, txqe);	    /* Tx queue empty interrupts */
    404 					    /* XXX not used? */
    405 
    406 	WM_Q_EVCNT_DEFINE(txq, ipsum);	    /* IP checksums comp. */
    407 	WM_Q_EVCNT_DEFINE(txq, tusum);	    /* TCP/UDP cksums comp. */
    408 	WM_Q_EVCNT_DEFINE(txq, tusum6);	    /* TCP/UDP v6 cksums comp. */
    409 	WM_Q_EVCNT_DEFINE(txq, tso);	    /* TCP seg offload (IPv4) */
    410 	WM_Q_EVCNT_DEFINE(txq, tso6);	    /* TCP seg offload (IPv6) */
    411 	WM_Q_EVCNT_DEFINE(txq, tsopain);    /* Painful header manip. for TSO */
    412 	WM_Q_EVCNT_DEFINE(txq, pcqdrop);    /* Pkt dropped in pcq */
    413 	WM_Q_EVCNT_DEFINE(txq, descdrop);   /* Pkt dropped in MAC desc ring */
    414 					    /* other than toomanyseg */
    415 
    416 	WM_Q_EVCNT_DEFINE(txq, toomanyseg); /* Pkt dropped(toomany DMA segs) */
    417 	WM_Q_EVCNT_DEFINE(txq, defrag);	    /* m_defrag() */
    418 	WM_Q_EVCNT_DEFINE(txq, underrun);   /* Tx underrun */
    419 	WM_Q_EVCNT_DEFINE(txq, skipcontext); /* Tx skip wrong cksum context */
    420 
    421 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    422 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    423 #endif /* WM_EVENT_COUNTERS */
    424 };
    425 
    426 struct wm_rxqueue {
    427 	kmutex_t *rxq_lock;		/* lock for rx operations */
    428 
    429 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    430 
    431 	/* Software state for the receive descriptors. */
    432 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    433 
    434 	/* RX control data structures. */
    435 	int rxq_ndesc;			/* must be a power of two */
    436 	size_t rxq_descsize;		/* a rx descriptor size */
    437 	rxdescs_t *rxq_descs_u;
    438 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    439 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    440 	int rxq_desc_rseg;		/* real number of control segment */
    441 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    442 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    443 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    444 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    445 
    446 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    447 
    448 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    449 	int rxq_discard;
    450 	int rxq_len;
    451 	struct mbuf *rxq_head;
    452 	struct mbuf *rxq_tail;
    453 	struct mbuf **rxq_tailp;
    454 
    455 	bool rxq_stopping;
    456 
    457 	uint32_t rxq_packets;		/* for AIM */
    458 	uint32_t rxq_bytes;		/* for AIM */
    459 #ifdef WM_EVENT_COUNTERS
    460 	/* RX event counters */
    461 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    462 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    463 
    464 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    465 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    466 #endif
    467 };
    468 
    469 struct wm_queue {
    470 	int wmq_id;			/* index of TX/RX queues */
    471 	int wmq_intr_idx;		/* index of MSI-X tables */
    472 
    473 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    474 	bool wmq_set_itr;
    475 
    476 	struct wm_txqueue wmq_txq;
    477 	struct wm_rxqueue wmq_rxq;
    478 	char sysctlname[32];		/* Name for sysctl */
    479 
    480 	bool wmq_txrx_use_workqueue;
    481 	bool wmq_wq_enqueued;
    482 	struct work wmq_cookie;
    483 	void *wmq_si;
    484 };
    485 
    486 struct wm_phyop {
    487 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
    488 	void (*release)(struct wm_softc *);
    489 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    490 	int (*writereg_locked)(device_t, int, int, uint16_t);
    491 	int reset_delay_us;
    492 	bool no_errprint;
    493 };
    494 
    495 struct wm_nvmop {
    496 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
    497 	void (*release)(struct wm_softc *);
    498 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    499 };
    500 
    501 /*
    502  * Software state per device.
    503  */
    504 struct wm_softc {
    505 	device_t sc_dev;		/* generic device information */
    506 	bus_space_tag_t sc_st;		/* bus space tag */
    507 	bus_space_handle_t sc_sh;	/* bus space handle */
    508 	bus_size_t sc_ss;		/* bus space size */
    509 	bus_space_tag_t sc_iot;		/* I/O space tag */
    510 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    511 	bus_size_t sc_ios;		/* I/O space size */
    512 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    513 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    514 	bus_size_t sc_flashs;		/* flash registers space size */
    515 	off_t sc_flashreg_offset;	/*
    516 					 * offset to flash registers from
    517 					 * start of BAR
    518 					 */
    519 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    520 
    521 	struct ethercom sc_ethercom;	/* Ethernet common data */
    522 	struct mii_data sc_mii;		/* MII/media information */
    523 
    524 	pci_chipset_tag_t sc_pc;
    525 	pcitag_t sc_pcitag;
    526 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    527 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    528 
    529 	uint16_t sc_pcidevid;		/* PCI device ID */
    530 	wm_chip_type sc_type;		/* MAC type */
    531 	int sc_rev;			/* MAC revision */
    532 	wm_phy_type sc_phytype;		/* PHY type */
    533 	uint8_t sc_sfptype;		/* SFP type */
    534 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    535 #define	WM_MEDIATYPE_UNKNOWN		0x00
    536 #define	WM_MEDIATYPE_FIBER		0x01
    537 #define	WM_MEDIATYPE_COPPER		0x02
    538 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    539 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    540 	int sc_flags;			/* flags; see below */
    541 	u_short sc_if_flags;		/* last if_flags */
    542 	int sc_ec_capenable;		/* last ec_capenable */
    543 	int sc_flowflags;		/* 802.3x flow control flags */
    544 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    545 	int sc_align_tweak;
    546 
    547 	void *sc_ihs[WM_MAX_NINTR];	/*
    548 					 * interrupt cookie.
    549 					 * - legacy and msi use sc_ihs[0] only
    550 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    551 					 */
    552 	pci_intr_handle_t *sc_intrs;	/*
    553 					 * legacy and msi use sc_intrs[0] only
    554 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    555 					 */
    556 	int sc_nintrs;			/* number of interrupts */
    557 
    558 	int sc_link_intr_idx;		/* index of MSI-X tables */
    559 
    560 	callout_t sc_tick_ch;		/* tick callout */
    561 	bool sc_core_stopping;
    562 
    563 	int sc_nvm_ver_major;
    564 	int sc_nvm_ver_minor;
    565 	int sc_nvm_ver_build;
    566 	int sc_nvm_addrbits;		/* NVM address bits */
    567 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    568 	int sc_ich8_flash_base;
    569 	int sc_ich8_flash_bank_size;
    570 	int sc_nvm_k1_enabled;
    571 
    572 	int sc_nqueues;
    573 	struct wm_queue *sc_queue;
    574 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    575 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    576 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    577 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    578 	struct workqueue *sc_queue_wq;
    579 	bool sc_txrx_use_workqueue;
    580 
    581 	int sc_affinity_offset;
    582 
    583 #ifdef WM_EVENT_COUNTERS
    584 	/* Event counters. */
    585 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    586 
    587 	/* >= WM_T_82542_2_1 */
    588 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    589 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    590 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    591 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    592 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    593 
    594 	struct evcnt sc_ev_crcerrs;	/* CRC Error */
    595 	struct evcnt sc_ev_algnerrc;	/* Alignment Error */
    596 	struct evcnt sc_ev_symerrc;	/* Symbol Error */
    597 	struct evcnt sc_ev_rxerrc;	/* Receive Error */
    598 	struct evcnt sc_ev_mpc;		/* Missed Packets */
    599 	struct evcnt sc_ev_scc;		/* Single Collision */
    600 	struct evcnt sc_ev_ecol;	/* Excessive Collision */
    601 	struct evcnt sc_ev_mcc;		/* Multiple Collision */
    602 	struct evcnt sc_ev_latecol;	/* Late Collision */
    603 	struct evcnt sc_ev_colc;	/* Collision */
    604 	struct evcnt sc_ev_dc;		/* Defer */
    605 	struct evcnt sc_ev_tncrs;	/* Tx-No CRS */
    606 	struct evcnt sc_ev_sec;		/* Sequence Error */
    607 	struct evcnt sc_ev_cexterr;	/* Carrier Extension Error */
    608 	struct evcnt sc_ev_rlec;	/* Receive Length Error */
    609 	struct evcnt sc_ev_prc64;	/* Packets Rx (64 bytes) */
    610 	struct evcnt sc_ev_prc127;	/* Packets Rx (65-127 bytes) */
    611 	struct evcnt sc_ev_prc255;	/* Packets Rx (128-255 bytes) */
    612 	struct evcnt sc_ev_prc511;	/* Packets Rx (255-511 bytes) */
    613 	struct evcnt sc_ev_prc1023;	/* Packets Rx (512-1023 bytes) */
    614 	struct evcnt sc_ev_prc1522;	/* Packets Rx (1024-1522 bytes) */
    615 	struct evcnt sc_ev_gprc;	/* Good Packets Rx */
    616 	struct evcnt sc_ev_bprc;	/* Broadcast Packets Rx */
    617 	struct evcnt sc_ev_mprc;	/* Multicast Packets Rx */
    618 	struct evcnt sc_ev_gptc;	/* Good Packets Tx */
    619 	struct evcnt sc_ev_gorc;	/* Good Octets Rx */
    620 	struct evcnt sc_ev_gotc;	/* Good Octets Tx */
    621 	struct evcnt sc_ev_rnbc;	/* Rx No Buffers */
    622 	struct evcnt sc_ev_ruc;		/* Rx Undersize */
    623 	struct evcnt sc_ev_rfc;		/* Rx Fragment */
    624 	struct evcnt sc_ev_roc;		/* Rx Oversize */
    625 	struct evcnt sc_ev_rjc;		/* Rx Jabber */
    626 	struct evcnt sc_ev_mgtprc;	/* Management Packets RX */
    627 	struct evcnt sc_ev_mgtpdc;	/* Management Packets Dropped */
    628 	struct evcnt sc_ev_mgtptc;	/* Management Packets TX */
    629 	struct evcnt sc_ev_tor;		/* Total Octets Rx */
    630 	struct evcnt sc_ev_tot;		/* Total Octets Tx */
    631 	struct evcnt sc_ev_tpr;		/* Total Packets Rx */
    632 	struct evcnt sc_ev_tpt;		/* Total Packets Tx */
    633 	struct evcnt sc_ev_ptc64;	/* Packets Tx (64 bytes) */
    634 	struct evcnt sc_ev_ptc127;	/* Packets Tx (65-127 bytes) */
    635 	struct evcnt sc_ev_ptc255;	/* Packets Tx (128-255 bytes) */
    636 	struct evcnt sc_ev_ptc511;	/* Packets Tx (256-511 bytes) */
    637 	struct evcnt sc_ev_ptc1023;	/* Packets Tx (512-1023 bytes) */
    638 	struct evcnt sc_ev_ptc1522;	/* Packets Tx (1024-1522 Bytes) */
    639 	struct evcnt sc_ev_mptc;	/* Multicast Packets Tx */
    640 	struct evcnt sc_ev_bptc;	/* Broadcast Packets Tx Count */
    641 	struct evcnt sc_ev_tsctc;	/* TCP Segmentation Context Tx */
    642 	struct evcnt sc_ev_tsctfc;	/* TCP Segmentation Context Tx Fail */
    643 	struct evcnt sc_ev_iac;		/* Interrupt Assertion */
    644 	struct evcnt sc_ev_icrxptc;	/* Intr. Cause Rx Pkt Timer Expire */
    645 	struct evcnt sc_ev_icrxatc;	/* Intr. Cause Rx Abs Timer Expire */
    646 	struct evcnt sc_ev_ictxptc;	/* Intr. Cause Tx Pkt Timer Expire */
    647 	struct evcnt sc_ev_ictxact;	/* Intr. Cause Tx Abs Timer Expire */
    648 	struct evcnt sc_ev_ictxqec;	/* Intr. Cause Tx Queue Empty */
    649 	struct evcnt sc_ev_ictxqmtc;	/* Intr. Cause Tx Queue Min Thresh */
    650 	struct evcnt sc_ev_icrxdmtc;	/* Intr. Cause Rx Desc Min Thresh */
    651 	struct evcnt sc_ev_icrxoc;	/* Intr. Cause Receiver Overrun */
    652 	struct evcnt sc_ev_b2ogprc;	/* BMC2OS pkts received by host */
    653 	struct evcnt sc_ev_o2bspc;	/* OS2BMC pkts transmitted by host */
    654 	struct evcnt sc_ev_b2ospc;	/* BMC2OS pkts sent by BMC */
    655 	struct evcnt sc_ev_o2bgptc;	/* OS2BMC pkts received by BMC */
    656 
    657 #endif /* WM_EVENT_COUNTERS */
    658 
    659 	struct sysctllog *sc_sysctllog;
    660 
    661 	/* This variable are used only on the 82547. */
    662 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    663 
    664 	uint32_t sc_ctrl;		/* prototype CTRL register */
    665 #if 0
    666 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    667 #endif
    668 	uint32_t sc_icr;		/* prototype interrupt bits */
    669 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    670 	uint32_t sc_tctl;		/* prototype TCTL register */
    671 	uint32_t sc_rctl;		/* prototype RCTL register */
    672 	uint32_t sc_txcw;		/* prototype TXCW register */
    673 	uint32_t sc_tipg;		/* prototype TIPG register */
    674 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    675 	uint32_t sc_pba;		/* prototype PBA register */
    676 
    677 	int sc_tbi_linkup;		/* TBI link status */
    678 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    679 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    680 
    681 	int sc_mchash_type;		/* multicast filter offset */
    682 
    683 	krndsource_t rnd_source;	/* random source */
    684 
    685 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    686 
    687 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    688 	kmutex_t *sc_ich_phymtx;	/*
    689 					 * 82574/82583/ICH/PCH specific PHY
    690 					 * mutex. For 82574/82583, the mutex
    691 					 * is used for both PHY and NVM.
    692 					 */
    693 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    694 
    695 	struct wm_phyop phy;
    696 	struct wm_nvmop nvm;
    697 
    698 	struct workqueue *sc_reset_wq;
    699 	struct work sc_reset_work;
    700 	volatile unsigned sc_reset_pending;
    701 
    702 	bool sc_dying;
    703 
    704 #ifdef WM_DEBUG
    705 	uint32_t sc_debug;
    706 	bool sc_trigger_reset;
    707 #endif
    708 };
    709 
    710 #define	WM_RXCHAIN_RESET(rxq)						\
    711 do {									\
    712 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    713 	*(rxq)->rxq_tailp = NULL;					\
    714 	(rxq)->rxq_len = 0;						\
    715 } while (/*CONSTCOND*/0)
    716 
    717 #define	WM_RXCHAIN_LINK(rxq, m)						\
    718 do {									\
    719 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    720 	(rxq)->rxq_tailp = &(m)->m_next;				\
    721 } while (/*CONSTCOND*/0)
    722 
    723 #ifdef WM_EVENT_COUNTERS
    724 #ifdef __HAVE_ATOMIC64_LOADSTORE
    725 #define	WM_EVCNT_INCR(ev)						\
    726 	atomic_store_relaxed(&((ev)->ev_count),				\
    727 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
    728 #define	WM_EVCNT_ADD(ev, val)						\
    729 	atomic_store_relaxed(&((ev)->ev_count),				\
    730 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
    731 #else
    732 #define	WM_EVCNT_INCR(ev)						\
    733 	((ev)->ev_count)++
    734 #define	WM_EVCNT_ADD(ev, val)						\
    735 	(ev)->ev_count += (val)
    736 #endif
    737 
    738 #define WM_Q_EVCNT_INCR(qname, evname)			\
    739 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    740 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    741 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    742 #else /* !WM_EVENT_COUNTERS */
    743 #define	WM_EVCNT_INCR(ev)	/* nothing */
    744 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    745 
    746 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    747 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    748 #endif /* !WM_EVENT_COUNTERS */
    749 
    750 #define	CSR_READ(sc, reg)						\
    751 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    752 #define	CSR_WRITE(sc, reg, val)						\
    753 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    754 #define	CSR_WRITE_FLUSH(sc)						\
    755 	(void)CSR_READ((sc), WMREG_STATUS)
    756 
    757 #define ICH8_FLASH_READ32(sc, reg)					\
    758 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    759 	    (reg) + sc->sc_flashreg_offset)
    760 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    761 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    762 	    (reg) + sc->sc_flashreg_offset, (data))
    763 
    764 #define ICH8_FLASH_READ16(sc, reg)					\
    765 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    766 	    (reg) + sc->sc_flashreg_offset)
    767 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    768 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    769 	    (reg) + sc->sc_flashreg_offset, (data))
    770 
    771 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    772 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    773 
    774 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    775 #define	WM_CDTXADDR_HI(txq, x)						\
    776 	(sizeof(bus_addr_t) == 8 ?					\
    777 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    778 
    779 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    780 #define	WM_CDRXADDR_HI(rxq, x)						\
    781 	(sizeof(bus_addr_t) == 8 ?					\
    782 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    783 
    784 /*
    785  * Register read/write functions.
    786  * Other than CSR_{READ|WRITE}().
    787  */
    788 #if 0
    789 static inline uint32_t wm_io_read(struct wm_softc *, int);
    790 #endif
    791 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    792 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    793     uint32_t, uint32_t);
    794 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    795 
    796 /*
    797  * Descriptor sync/init functions.
    798  */
    799 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    800 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    801 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    802 
    803 /*
    804  * Device driver interface functions and commonly used functions.
    805  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    806  */
    807 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    808 static int	wm_match(device_t, cfdata_t, void *);
    809 static void	wm_attach(device_t, device_t, void *);
    810 static int	wm_detach(device_t, int);
    811 static bool	wm_suspend(device_t, const pmf_qual_t *);
    812 static bool	wm_resume(device_t, const pmf_qual_t *);
    813 static bool	wm_watchdog(struct ifnet *);
    814 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    815     uint16_t *);
    816 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    817     uint16_t *);
    818 static void	wm_tick(void *);
    819 static int	wm_ifflags_cb(struct ethercom *);
    820 static int	wm_ioctl(struct ifnet *, u_long, void *);
    821 /* MAC address related */
    822 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    823 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    824 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    825 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    826 static int	wm_rar_count(struct wm_softc *);
    827 static void	wm_set_filter(struct wm_softc *);
    828 /* Reset and init related */
    829 static void	wm_set_vlan(struct wm_softc *);
    830 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    831 static void	wm_get_auto_rd_done(struct wm_softc *);
    832 static void	wm_lan_init_done(struct wm_softc *);
    833 static void	wm_get_cfg_done(struct wm_softc *);
    834 static int	wm_phy_post_reset(struct wm_softc *);
    835 static int	wm_write_smbus_addr(struct wm_softc *);
    836 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    837 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    838 static void	wm_initialize_hardware_bits(struct wm_softc *);
    839 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    840 static int	wm_reset_phy(struct wm_softc *);
    841 static void	wm_flush_desc_rings(struct wm_softc *);
    842 static void	wm_reset(struct wm_softc *);
    843 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    844 static void	wm_rxdrain(struct wm_rxqueue *);
    845 static void	wm_init_rss(struct wm_softc *);
    846 static void	wm_adjust_qnum(struct wm_softc *, int);
    847 static inline bool	wm_is_using_msix(struct wm_softc *);
    848 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    849 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    850 static int	wm_setup_legacy(struct wm_softc *);
    851 static int	wm_setup_msix(struct wm_softc *);
    852 static int	wm_init(struct ifnet *);
    853 static int	wm_init_locked(struct ifnet *);
    854 static void	wm_init_sysctls(struct wm_softc *);
    855 static void	wm_unset_stopping_flags(struct wm_softc *);
    856 static void	wm_set_stopping_flags(struct wm_softc *);
    857 static void	wm_stop(struct ifnet *, int);
    858 static void	wm_stop_locked(struct ifnet *, bool, bool);
    859 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    860 static void	wm_82547_txfifo_stall(void *);
    861 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    862 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    863 /* DMA related */
    864 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    865 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    866 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    867 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    868     struct wm_txqueue *);
    869 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    870 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    871 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    872     struct wm_rxqueue *);
    873 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    874 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    875 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    876 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    877 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    878 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    879 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    880     struct wm_txqueue *);
    881 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    882     struct wm_rxqueue *);
    883 static int	wm_alloc_txrx_queues(struct wm_softc *);
    884 static void	wm_free_txrx_queues(struct wm_softc *);
    885 static int	wm_init_txrx_queues(struct wm_softc *);
    886 /* Start */
    887 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    888     struct wm_txsoft *, uint32_t *, uint8_t *);
    889 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    890 static void	wm_start(struct ifnet *);
    891 static void	wm_start_locked(struct ifnet *);
    892 static int	wm_transmit(struct ifnet *, struct mbuf *);
    893 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    894 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    895     bool);
    896 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    897     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    898 static void	wm_nq_start(struct ifnet *);
    899 static void	wm_nq_start_locked(struct ifnet *);
    900 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    901 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    902 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    903     bool);
    904 static void	wm_deferred_start_locked(struct wm_txqueue *);
    905 static void	wm_handle_queue(void *);
    906 static void	wm_handle_queue_work(struct work *, void *);
    907 static void	wm_handle_reset_work(struct work *, void *);
    908 /* Interrupt */
    909 static bool	wm_txeof(struct wm_txqueue *, u_int);
    910 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    911 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    912 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    913 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    914 static void	wm_linkintr(struct wm_softc *, uint32_t);
    915 static int	wm_intr_legacy(void *);
    916 static inline void	wm_txrxintr_disable(struct wm_queue *);
    917 static inline void	wm_txrxintr_enable(struct wm_queue *);
    918 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    919 static int	wm_txrxintr_msix(void *);
    920 static int	wm_linkintr_msix(void *);
    921 
    922 /*
    923  * Media related.
    924  * GMII, SGMII, TBI, SERDES and SFP.
    925  */
    926 /* Common */
    927 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    928 /* GMII related */
    929 static void	wm_gmii_reset(struct wm_softc *);
    930 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    931 static int	wm_get_phy_id_82575(struct wm_softc *);
    932 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    933 static int	wm_gmii_mediachange(struct ifnet *);
    934 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    935 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    936 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    937 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    938 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    939 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    940 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    941 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    942 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    943 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    944 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    945 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    946 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    947 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    948 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    949 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    950 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    951 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    952 	bool);
    953 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    954 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    955 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    956 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    957 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    958 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    959 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    960 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    961 static void	wm_gmii_statchg(struct ifnet *);
    962 /*
    963  * kumeran related (80003, ICH* and PCH*).
    964  * These functions are not for accessing MII registers but for accessing
    965  * kumeran specific registers.
    966  */
    967 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    968 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    969 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    970 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    971 /* EMI register related */
    972 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    973 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    974 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    975 /* SGMII */
    976 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    977 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
    978 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    979 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    980 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    981 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    982 /* TBI related */
    983 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    984 static void	wm_tbi_mediainit(struct wm_softc *);
    985 static int	wm_tbi_mediachange(struct ifnet *);
    986 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    987 static int	wm_check_for_link(struct wm_softc *);
    988 static void	wm_tbi_tick(struct wm_softc *);
    989 /* SERDES related */
    990 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    991 static int	wm_serdes_mediachange(struct ifnet *);
    992 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    993 static void	wm_serdes_tick(struct wm_softc *);
    994 /* SFP related */
    995 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    996 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    997 
    998 /*
    999  * NVM related.
   1000  * Microwire, SPI (w/wo EERD) and Flash.
   1001  */
   1002 /* Misc functions */
   1003 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
   1004 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
   1005 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
   1006 /* Microwire */
   1007 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
   1008 /* SPI */
   1009 static int	wm_nvm_ready_spi(struct wm_softc *);
   1010 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
   1011 /* Using with EERD */
   1012 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
   1013 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
   1014 /* Flash */
   1015 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
   1016     unsigned int *);
   1017 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
   1018 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
   1019 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
   1020     uint32_t *);
   1021 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
   1022 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
   1023 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
   1024 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
   1025 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
   1026 /* iNVM */
   1027 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
   1028 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
   1029 /* Lock, detecting NVM type, validate checksum and read */
   1030 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
   1031 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
   1032 static int	wm_nvm_validate_checksum(struct wm_softc *);
   1033 static void	wm_nvm_version_invm(struct wm_softc *);
   1034 static void	wm_nvm_version(struct wm_softc *);
   1035 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
   1036 
   1037 /*
   1038  * Hardware semaphores.
   1039  * Very complexed...
   1040  */
   1041 static int	wm_get_null(struct wm_softc *);
   1042 static void	wm_put_null(struct wm_softc *);
   1043 static int	wm_get_eecd(struct wm_softc *);
   1044 static void	wm_put_eecd(struct wm_softc *);
   1045 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
   1046 static void	wm_put_swsm_semaphore(struct wm_softc *);
   1047 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
   1048 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
   1049 static int	wm_get_nvm_80003(struct wm_softc *);
   1050 static void	wm_put_nvm_80003(struct wm_softc *);
   1051 static int	wm_get_nvm_82571(struct wm_softc *);
   1052 static void	wm_put_nvm_82571(struct wm_softc *);
   1053 static int	wm_get_phy_82575(struct wm_softc *);
   1054 static void	wm_put_phy_82575(struct wm_softc *);
   1055 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
   1056 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
   1057 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
   1058 static void	wm_put_swflag_ich8lan(struct wm_softc *);
   1059 static int	wm_get_nvm_ich8lan(struct wm_softc *);
   1060 static void	wm_put_nvm_ich8lan(struct wm_softc *);
   1061 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
   1062 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
   1063 
   1064 /*
   1065  * Management mode and power management related subroutines.
   1066  * BMC, AMT, suspend/resume and EEE.
   1067  */
   1068 #if 0
   1069 static int	wm_check_mng_mode(struct wm_softc *);
   1070 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
   1071 static int	wm_check_mng_mode_82574(struct wm_softc *);
   1072 static int	wm_check_mng_mode_generic(struct wm_softc *);
   1073 #endif
   1074 static int	wm_enable_mng_pass_thru(struct wm_softc *);
   1075 static bool	wm_phy_resetisblocked(struct wm_softc *);
   1076 static void	wm_get_hw_control(struct wm_softc *);
   1077 static void	wm_release_hw_control(struct wm_softc *);
   1078 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
   1079 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
   1080 static void	wm_init_manageability(struct wm_softc *);
   1081 static void	wm_release_manageability(struct wm_softc *);
   1082 static void	wm_get_wakeup(struct wm_softc *);
   1083 static int	wm_ulp_disable(struct wm_softc *);
   1084 static int	wm_enable_phy_wakeup(struct wm_softc *);
   1085 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
   1086 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
   1087 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1088 static void	wm_enable_wakeup(struct wm_softc *);
   1089 static void	wm_disable_aspm(struct wm_softc *);
   1090 /* LPLU (Low Power Link Up) */
   1091 static void	wm_lplu_d0_disable(struct wm_softc *);
   1092 /* EEE */
   1093 static int	wm_set_eee_i350(struct wm_softc *);
   1094 static int	wm_set_eee_pchlan(struct wm_softc *);
   1095 static int	wm_set_eee(struct wm_softc *);
   1096 
   1097 /*
   1098  * Workarounds (mainly PHY related).
   1099  * Basically, PHY's workarounds are in the PHY drivers.
   1100  */
   1101 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1102 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1103 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1104 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1105 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
   1106 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
   1107 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1108 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1109 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1110 static int	wm_k1_workaround_lv(struct wm_softc *);
   1111 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1112 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1113 static int	wm_set_mdio_slow_mode_hv_locked(struct wm_softc *);
   1114 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1115 static void	wm_reset_init_script_82575(struct wm_softc *);
   1116 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1117 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1118 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1119 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1120 static int	wm_pll_workaround_i210(struct wm_softc *);
   1121 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1122 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
   1123 static void	wm_set_linkdown_discard(struct wm_softc *);
   1124 static void	wm_clear_linkdown_discard(struct wm_softc *);
   1125 
   1126 static int	wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
   1127 static int	wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
   1128 #ifdef WM_DEBUG
   1129 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
   1130 #endif
   1131 
   1132 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1133     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1134 
   1135 /*
   1136  * Devices supported by this driver.
   1137  */
   1138 static const struct wm_product {
   1139 	pci_vendor_id_t		wmp_vendor;
   1140 	pci_product_id_t	wmp_product;
   1141 	const char		*wmp_name;
   1142 	wm_chip_type		wmp_type;
   1143 	uint32_t		wmp_flags;
   1144 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1145 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1146 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1147 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1148 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1149 } wm_products[] = {
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1151 	  "Intel i82542 1000BASE-X Ethernet",
   1152 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1153 
   1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1155 	  "Intel i82543GC 1000BASE-X Ethernet",
   1156 	  WM_T_82543,		WMP_F_FIBER },
   1157 
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1159 	  "Intel i82543GC 1000BASE-T Ethernet",
   1160 	  WM_T_82543,		WMP_F_COPPER },
   1161 
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1163 	  "Intel i82544EI 1000BASE-T Ethernet",
   1164 	  WM_T_82544,		WMP_F_COPPER },
   1165 
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1167 	  "Intel i82544EI 1000BASE-X Ethernet",
   1168 	  WM_T_82544,		WMP_F_FIBER },
   1169 
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1171 	  "Intel i82544GC 1000BASE-T Ethernet",
   1172 	  WM_T_82544,		WMP_F_COPPER },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1175 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1176 	  WM_T_82544,		WMP_F_COPPER },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1179 	  "Intel i82540EM 1000BASE-T Ethernet",
   1180 	  WM_T_82540,		WMP_F_COPPER },
   1181 
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1183 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1184 	  WM_T_82540,		WMP_F_COPPER },
   1185 
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1187 	  "Intel i82540EP 1000BASE-T Ethernet",
   1188 	  WM_T_82540,		WMP_F_COPPER },
   1189 
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1191 	  "Intel i82540EP 1000BASE-T Ethernet",
   1192 	  WM_T_82540,		WMP_F_COPPER },
   1193 
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1195 	  "Intel i82540EP 1000BASE-T Ethernet",
   1196 	  WM_T_82540,		WMP_F_COPPER },
   1197 
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1199 	  "Intel i82545EM 1000BASE-T Ethernet",
   1200 	  WM_T_82545,		WMP_F_COPPER },
   1201 
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1203 	  "Intel i82545GM 1000BASE-T Ethernet",
   1204 	  WM_T_82545_3,		WMP_F_COPPER },
   1205 
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1207 	  "Intel i82545GM 1000BASE-X Ethernet",
   1208 	  WM_T_82545_3,		WMP_F_FIBER },
   1209 
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1211 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1212 	  WM_T_82545_3,		WMP_F_SERDES },
   1213 
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1215 	  "Intel i82546EB 1000BASE-T Ethernet",
   1216 	  WM_T_82546,		WMP_F_COPPER },
   1217 
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1219 	  "Intel i82546EB 1000BASE-T Ethernet",
   1220 	  WM_T_82546,		WMP_F_COPPER },
   1221 
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1223 	  "Intel i82545EM 1000BASE-X Ethernet",
   1224 	  WM_T_82545,		WMP_F_FIBER },
   1225 
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1227 	  "Intel i82546EB 1000BASE-X Ethernet",
   1228 	  WM_T_82546,		WMP_F_FIBER },
   1229 
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1231 	  "Intel i82546GB 1000BASE-T Ethernet",
   1232 	  WM_T_82546_3,		WMP_F_COPPER },
   1233 
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1235 	  "Intel i82546GB 1000BASE-X Ethernet",
   1236 	  WM_T_82546_3,		WMP_F_FIBER },
   1237 
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1239 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1240 	  WM_T_82546_3,		WMP_F_SERDES },
   1241 
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1243 	  "i82546GB quad-port Gigabit Ethernet",
   1244 	  WM_T_82546_3,		WMP_F_COPPER },
   1245 
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1247 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1248 	  WM_T_82546_3,		WMP_F_COPPER },
   1249 
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1251 	  "Intel PRO/1000MT (82546GB)",
   1252 	  WM_T_82546_3,		WMP_F_COPPER },
   1253 
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1255 	  "Intel i82541EI 1000BASE-T Ethernet",
   1256 	  WM_T_82541,		WMP_F_COPPER },
   1257 
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1259 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1260 	  WM_T_82541,		WMP_F_COPPER },
   1261 
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1263 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1264 	  WM_T_82541,		WMP_F_COPPER },
   1265 
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1267 	  "Intel i82541ER 1000BASE-T Ethernet",
   1268 	  WM_T_82541_2,		WMP_F_COPPER },
   1269 
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1271 	  "Intel i82541GI 1000BASE-T Ethernet",
   1272 	  WM_T_82541_2,		WMP_F_COPPER },
   1273 
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1275 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1276 	  WM_T_82541_2,		WMP_F_COPPER },
   1277 
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1279 	  "Intel i82541PI 1000BASE-T Ethernet",
   1280 	  WM_T_82541_2,		WMP_F_COPPER },
   1281 
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1283 	  "Intel i82547EI 1000BASE-T Ethernet",
   1284 	  WM_T_82547,		WMP_F_COPPER },
   1285 
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1287 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1288 	  WM_T_82547,		WMP_F_COPPER },
   1289 
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1291 	  "Intel i82547GI 1000BASE-T Ethernet",
   1292 	  WM_T_82547_2,		WMP_F_COPPER },
   1293 
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1295 	  "Intel PRO/1000 PT (82571EB)",
   1296 	  WM_T_82571,		WMP_F_COPPER },
   1297 
   1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1299 	  "Intel PRO/1000 PF (82571EB)",
   1300 	  WM_T_82571,		WMP_F_FIBER },
   1301 
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1303 	  "Intel PRO/1000 PB (82571EB)",
   1304 	  WM_T_82571,		WMP_F_SERDES },
   1305 
   1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1307 	  "Intel PRO/1000 QT (82571EB)",
   1308 	  WM_T_82571,		WMP_F_COPPER },
   1309 
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1311 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1312 	  WM_T_82571,		WMP_F_COPPER },
   1313 
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1315 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1316 	  WM_T_82571,		WMP_F_COPPER },
   1317 
   1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1319 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1320 	  WM_T_82571,		WMP_F_SERDES },
   1321 
   1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1323 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1324 	  WM_T_82571,		WMP_F_SERDES },
   1325 
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1327 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1328 	  WM_T_82571,		WMP_F_FIBER },
   1329 
   1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1331 	  "Intel i82572EI 1000baseT Ethernet",
   1332 	  WM_T_82572,		WMP_F_COPPER },
   1333 
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1335 	  "Intel i82572EI 1000baseX Ethernet",
   1336 	  WM_T_82572,		WMP_F_FIBER },
   1337 
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1339 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1340 	  WM_T_82572,		WMP_F_SERDES },
   1341 
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1343 	  "Intel i82572EI 1000baseT Ethernet",
   1344 	  WM_T_82572,		WMP_F_COPPER },
   1345 
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1347 	  "Intel i82573E",
   1348 	  WM_T_82573,		WMP_F_COPPER },
   1349 
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1351 	  "Intel i82573E IAMT",
   1352 	  WM_T_82573,		WMP_F_COPPER },
   1353 
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1355 	  "Intel i82573L Gigabit Ethernet",
   1356 	  WM_T_82573,		WMP_F_COPPER },
   1357 
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1359 	  "Intel i82574L",
   1360 	  WM_T_82574,		WMP_F_COPPER },
   1361 
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1363 	  "Intel i82574L",
   1364 	  WM_T_82574,		WMP_F_COPPER },
   1365 
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1367 	  "Intel i82583V",
   1368 	  WM_T_82583,		WMP_F_COPPER },
   1369 
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1371 	  "i80003 dual 1000baseT Ethernet",
   1372 	  WM_T_80003,		WMP_F_COPPER },
   1373 
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1375 	  "i80003 dual 1000baseX Ethernet",
   1376 	  WM_T_80003,		WMP_F_COPPER },
   1377 
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1379 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1380 	  WM_T_80003,		WMP_F_SERDES },
   1381 
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1383 	  "Intel i80003 1000baseT Ethernet",
   1384 	  WM_T_80003,		WMP_F_COPPER },
   1385 
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1387 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1388 	  WM_T_80003,		WMP_F_SERDES },
   1389 
   1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1391 	  "Intel i82801H (M_AMT) LAN Controller",
   1392 	  WM_T_ICH8,		WMP_F_COPPER },
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1394 	  "Intel i82801H (AMT) LAN Controller",
   1395 	  WM_T_ICH8,		WMP_F_COPPER },
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1397 	  "Intel i82801H LAN Controller",
   1398 	  WM_T_ICH8,		WMP_F_COPPER },
   1399 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1400 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1401 	  WM_T_ICH8,		WMP_F_COPPER },
   1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1403 	  "Intel i82801H (M) LAN Controller",
   1404 	  WM_T_ICH8,		WMP_F_COPPER },
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1406 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1407 	  WM_T_ICH8,		WMP_F_COPPER },
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1409 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1410 	  WM_T_ICH8,		WMP_F_COPPER },
   1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1412 	  "82567V-3 LAN Controller",
   1413 	  WM_T_ICH8,		WMP_F_COPPER },
   1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1415 	  "82801I (AMT) LAN Controller",
   1416 	  WM_T_ICH9,		WMP_F_COPPER },
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1418 	  "82801I 10/100 LAN Controller",
   1419 	  WM_T_ICH9,		WMP_F_COPPER },
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1421 	  "82801I (G) 10/100 LAN Controller",
   1422 	  WM_T_ICH9,		WMP_F_COPPER },
   1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1424 	  "82801I (GT) 10/100 LAN Controller",
   1425 	  WM_T_ICH9,		WMP_F_COPPER },
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1427 	  "82801I (C) LAN Controller",
   1428 	  WM_T_ICH9,		WMP_F_COPPER },
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1430 	  "82801I mobile LAN Controller",
   1431 	  WM_T_ICH9,		WMP_F_COPPER },
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1433 	  "82801I mobile (V) LAN Controller",
   1434 	  WM_T_ICH9,		WMP_F_COPPER },
   1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1436 	  "82801I mobile (AMT) LAN Controller",
   1437 	  WM_T_ICH9,		WMP_F_COPPER },
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1439 	  "82567LM-4 LAN Controller",
   1440 	  WM_T_ICH9,		WMP_F_COPPER },
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1442 	  "82567LM-2 LAN Controller",
   1443 	  WM_T_ICH10,		WMP_F_COPPER },
   1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1445 	  "82567LF-2 LAN Controller",
   1446 	  WM_T_ICH10,		WMP_F_COPPER },
   1447 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1448 	  "82567LM-3 LAN Controller",
   1449 	  WM_T_ICH10,		WMP_F_COPPER },
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1451 	  "82567LF-3 LAN Controller",
   1452 	  WM_T_ICH10,		WMP_F_COPPER },
   1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1454 	  "82567V-2 LAN Controller",
   1455 	  WM_T_ICH10,		WMP_F_COPPER },
   1456 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1457 	  "82567V-3? LAN Controller",
   1458 	  WM_T_ICH10,		WMP_F_COPPER },
   1459 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1460 	  "HANKSVILLE LAN Controller",
   1461 	  WM_T_ICH10,		WMP_F_COPPER },
   1462 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1463 	  "PCH LAN (82577LM) Controller",
   1464 	  WM_T_PCH,		WMP_F_COPPER },
   1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1466 	  "PCH LAN (82577LC) Controller",
   1467 	  WM_T_PCH,		WMP_F_COPPER },
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1469 	  "PCH LAN (82578DM) Controller",
   1470 	  WM_T_PCH,		WMP_F_COPPER },
   1471 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1472 	  "PCH LAN (82578DC) Controller",
   1473 	  WM_T_PCH,		WMP_F_COPPER },
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1475 	  "PCH2 LAN (82579LM) Controller",
   1476 	  WM_T_PCH2,		WMP_F_COPPER },
   1477 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1478 	  "PCH2 LAN (82579V) Controller",
   1479 	  WM_T_PCH2,		WMP_F_COPPER },
   1480 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1481 	  "82575EB dual-1000baseT Ethernet",
   1482 	  WM_T_82575,		WMP_F_COPPER },
   1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1484 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1485 	  WM_T_82575,		WMP_F_SERDES },
   1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1487 	  "82575GB quad-1000baseT Ethernet",
   1488 	  WM_T_82575,		WMP_F_COPPER },
   1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1490 	  "82575GB quad-1000baseT Ethernet (PM)",
   1491 	  WM_T_82575,		WMP_F_COPPER },
   1492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1493 	  "82576 1000BaseT Ethernet",
   1494 	  WM_T_82576,		WMP_F_COPPER },
   1495 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1496 	  "82576 1000BaseX Ethernet",
   1497 	  WM_T_82576,		WMP_F_FIBER },
   1498 
   1499 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1500 	  "82576 gigabit Ethernet (SERDES)",
   1501 	  WM_T_82576,		WMP_F_SERDES },
   1502 
   1503 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1504 	  "82576 quad-1000BaseT Ethernet",
   1505 	  WM_T_82576,		WMP_F_COPPER },
   1506 
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1508 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1509 	  WM_T_82576,		WMP_F_COPPER },
   1510 
   1511 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1512 	  "82576 gigabit Ethernet",
   1513 	  WM_T_82576,		WMP_F_COPPER },
   1514 
   1515 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1516 	  "82576 gigabit Ethernet (SERDES)",
   1517 	  WM_T_82576,		WMP_F_SERDES },
   1518 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1519 	  "82576 quad-gigabit Ethernet (SERDES)",
   1520 	  WM_T_82576,		WMP_F_SERDES },
   1521 
   1522 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1523 	  "82580 1000BaseT Ethernet",
   1524 	  WM_T_82580,		WMP_F_COPPER },
   1525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1526 	  "82580 1000BaseX Ethernet",
   1527 	  WM_T_82580,		WMP_F_FIBER },
   1528 
   1529 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1530 	  "82580 1000BaseT Ethernet (SERDES)",
   1531 	  WM_T_82580,		WMP_F_SERDES },
   1532 
   1533 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1534 	  "82580 gigabit Ethernet (SGMII)",
   1535 	  WM_T_82580,		WMP_F_COPPER },
   1536 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1537 	  "82580 dual-1000BaseT Ethernet",
   1538 	  WM_T_82580,		WMP_F_COPPER },
   1539 
   1540 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1541 	  "82580 quad-1000BaseX Ethernet",
   1542 	  WM_T_82580,		WMP_F_FIBER },
   1543 
   1544 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1545 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1546 	  WM_T_82580,		WMP_F_COPPER },
   1547 
   1548 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1549 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1550 	  WM_T_82580,		WMP_F_SERDES },
   1551 
   1552 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1553 	  "DH89XXCC 1000BASE-KX Ethernet",
   1554 	  WM_T_82580,		WMP_F_SERDES },
   1555 
   1556 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1557 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1558 	  WM_T_82580,		WMP_F_SERDES },
   1559 
   1560 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1561 	  "I350 Gigabit Network Connection",
   1562 	  WM_T_I350,		WMP_F_COPPER },
   1563 
   1564 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1565 	  "I350 Gigabit Fiber Network Connection",
   1566 	  WM_T_I350,		WMP_F_FIBER },
   1567 
   1568 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1569 	  "I350 Gigabit Backplane Connection",
   1570 	  WM_T_I350,		WMP_F_SERDES },
   1571 
   1572 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1573 	  "I350 Quad Port Gigabit Ethernet",
   1574 	  WM_T_I350,		WMP_F_SERDES },
   1575 
   1576 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1577 	  "I350 Gigabit Connection",
   1578 	  WM_T_I350,		WMP_F_COPPER },
   1579 
   1580 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1581 	  "I354 Gigabit Ethernet (KX)",
   1582 	  WM_T_I354,		WMP_F_SERDES },
   1583 
   1584 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1585 	  "I354 Gigabit Ethernet (SGMII)",
   1586 	  WM_T_I354,		WMP_F_COPPER },
   1587 
   1588 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1589 	  "I354 Gigabit Ethernet (2.5G)",
   1590 	  WM_T_I354,		WMP_F_COPPER },
   1591 
   1592 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1593 	  "I210-T1 Ethernet Server Adapter",
   1594 	  WM_T_I210,		WMP_F_COPPER },
   1595 
   1596 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1597 	  "I210 Ethernet (Copper OEM)",
   1598 	  WM_T_I210,		WMP_F_COPPER },
   1599 
   1600 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1601 	  "I210 Ethernet (Copper IT)",
   1602 	  WM_T_I210,		WMP_F_COPPER },
   1603 
   1604 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1605 	  "I210 Ethernet (Copper, FLASH less)",
   1606 	  WM_T_I210,		WMP_F_COPPER },
   1607 
   1608 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1609 	  "I210 Gigabit Ethernet (Fiber)",
   1610 	  WM_T_I210,		WMP_F_FIBER },
   1611 
   1612 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1613 	  "I210 Gigabit Ethernet (SERDES)",
   1614 	  WM_T_I210,		WMP_F_SERDES },
   1615 
   1616 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1617 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1618 	  WM_T_I210,		WMP_F_SERDES },
   1619 
   1620 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1621 	  "I210 Gigabit Ethernet (SGMII)",
   1622 	  WM_T_I210,		WMP_F_COPPER },
   1623 
   1624 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1625 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1626 	  WM_T_I210,		WMP_F_COPPER },
   1627 
   1628 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1629 	  "I211 Ethernet (COPPER)",
   1630 	  WM_T_I211,		WMP_F_COPPER },
   1631 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1632 	  "I217 V Ethernet Connection",
   1633 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1634 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1635 	  "I217 LM Ethernet Connection",
   1636 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1637 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1638 	  "I218 V Ethernet Connection",
   1639 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1640 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1641 	  "I218 V Ethernet Connection",
   1642 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1643 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1644 	  "I218 V Ethernet Connection",
   1645 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1646 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1647 	  "I218 LM Ethernet Connection",
   1648 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1649 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1650 	  "I218 LM Ethernet Connection",
   1651 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1652 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1653 	  "I218 LM Ethernet Connection",
   1654 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1655 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1656 	  "I219 LM Ethernet Connection",
   1657 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1658 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1659 	  "I219 LM (2) Ethernet Connection",
   1660 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1661 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1662 	  "I219 LM (3) Ethernet Connection",
   1663 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1664 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1665 	  "I219 LM (4) Ethernet Connection",
   1666 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1667 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1668 	  "I219 LM (5) Ethernet Connection",
   1669 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1670 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1671 	  "I219 LM (6) Ethernet Connection",
   1672 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1673 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1674 	  "I219 LM (7) Ethernet Connection",
   1675 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1676 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1677 	  "I219 LM (8) Ethernet Connection",
   1678 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1679 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1680 	  "I219 LM (9) Ethernet Connection",
   1681 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1682 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1683 	  "I219 LM (10) Ethernet Connection",
   1684 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1685 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1686 	  "I219 LM (11) Ethernet Connection",
   1687 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1688 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1689 	  "I219 LM (12) Ethernet Connection",
   1690 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1691 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1692 	  "I219 LM (13) Ethernet Connection",
   1693 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1694 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1695 	  "I219 LM (14) Ethernet Connection",
   1696 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1697 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1698 	  "I219 LM (15) Ethernet Connection",
   1699 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1700 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM16,
   1701 	  "I219 LM (16) Ethernet Connection",
   1702 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1703 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM17,
   1704 	  "I219 LM (17) Ethernet Connection",
   1705 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1706 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM18,
   1707 	  "I219 LM (18) Ethernet Connection",
   1708 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1709 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM19,
   1710 	  "I219 LM (19) Ethernet Connection",
   1711 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1712 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1713 	  "I219 V Ethernet Connection",
   1714 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1715 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1716 	  "I219 V (2) Ethernet Connection",
   1717 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1718 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1719 	  "I219 V (4) Ethernet Connection",
   1720 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1721 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1722 	  "I219 V (5) Ethernet Connection",
   1723 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1724 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1725 	  "I219 V (6) Ethernet Connection",
   1726 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1727 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1728 	  "I219 V (7) Ethernet Connection",
   1729 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1730 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1731 	  "I219 V (8) Ethernet Connection",
   1732 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1733 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1734 	  "I219 V (9) Ethernet Connection",
   1735 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1736 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1737 	  "I219 V (10) Ethernet Connection",
   1738 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1739 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1740 	  "I219 V (11) Ethernet Connection",
   1741 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1742 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1743 	  "I219 V (12) Ethernet Connection",
   1744 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1745 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1746 	  "I219 V (13) Ethernet Connection",
   1747 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1748 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1749 	  "I219 V (14) Ethernet Connection",
   1750 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1751 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V15,
   1752 	  "I219 V (15) Ethernet Connection",
   1753 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1754 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V16,
   1755 	  "I219 V (16) Ethernet Connection",
   1756 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1757 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V17,
   1758 	  "I219 V (17) Ethernet Connection",
   1759 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1760 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V18,
   1761 	  "I219 V (18) Ethernet Connection",
   1762 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1763 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V19,
   1764 	  "I219 V (19) Ethernet Connection",
   1765 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1766 	{ 0,			0,
   1767 	  NULL,
   1768 	  0,			0 },
   1769 };
   1770 
   1771 /*
   1772  * Register read/write functions.
   1773  * Other than CSR_{READ|WRITE}().
   1774  */
   1775 
   1776 #if 0 /* Not currently used */
   1777 static inline uint32_t
   1778 wm_io_read(struct wm_softc *sc, int reg)
   1779 {
   1780 
   1781 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1782 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1783 }
   1784 #endif
   1785 
   1786 static inline void
   1787 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1788 {
   1789 
   1790 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1791 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1792 }
   1793 
   1794 static inline void
   1795 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1796     uint32_t data)
   1797 {
   1798 	uint32_t regval;
   1799 	int i;
   1800 
   1801 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1802 
   1803 	CSR_WRITE(sc, reg, regval);
   1804 
   1805 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1806 		delay(5);
   1807 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1808 			break;
   1809 	}
   1810 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1811 		aprint_error("%s: WARNING:"
   1812 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1813 		    device_xname(sc->sc_dev), reg);
   1814 	}
   1815 }
   1816 
   1817 static inline void
   1818 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1819 {
   1820 	wa->wa_low = htole32(BUS_ADDR_LO32(v));
   1821 	wa->wa_high = htole32(BUS_ADDR_HI32(v));
   1822 }
   1823 
   1824 /*
   1825  * Descriptor sync/init functions.
   1826  */
   1827 static inline void
   1828 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1829 {
   1830 	struct wm_softc *sc = txq->txq_sc;
   1831 
   1832 	/* If it will wrap around, sync to the end of the ring. */
   1833 	if ((start + num) > WM_NTXDESC(txq)) {
   1834 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1835 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1836 		    (WM_NTXDESC(txq) - start), ops);
   1837 		num -= (WM_NTXDESC(txq) - start);
   1838 		start = 0;
   1839 	}
   1840 
   1841 	/* Now sync whatever is left. */
   1842 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1843 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1844 }
   1845 
   1846 static inline void
   1847 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1848 {
   1849 	struct wm_softc *sc = rxq->rxq_sc;
   1850 
   1851 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1852 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1853 }
   1854 
   1855 static inline void
   1856 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1857 {
   1858 	struct wm_softc *sc = rxq->rxq_sc;
   1859 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1860 	struct mbuf *m = rxs->rxs_mbuf;
   1861 
   1862 	/*
   1863 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1864 	 * so that the payload after the Ethernet header is aligned
   1865 	 * to a 4-byte boundary.
   1866 
   1867 	 * XXX BRAINDAMAGE ALERT!
   1868 	 * The stupid chip uses the same size for every buffer, which
   1869 	 * is set in the Receive Control register.  We are using the 2K
   1870 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1871 	 * reason, we can't "scoot" packets longer than the standard
   1872 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1873 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1874 	 * the upper layer copy the headers.
   1875 	 */
   1876 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1877 
   1878 	if (sc->sc_type == WM_T_82574) {
   1879 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1880 		rxd->erx_data.erxd_addr =
   1881 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1882 		rxd->erx_data.erxd_dd = 0;
   1883 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1884 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1885 
   1886 		rxd->nqrx_data.nrxd_paddr =
   1887 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1888 		/* Currently, split header is not supported. */
   1889 		rxd->nqrx_data.nrxd_haddr = 0;
   1890 	} else {
   1891 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1892 
   1893 		wm_set_dma_addr(&rxd->wrx_addr,
   1894 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1895 		rxd->wrx_len = 0;
   1896 		rxd->wrx_cksum = 0;
   1897 		rxd->wrx_status = 0;
   1898 		rxd->wrx_errors = 0;
   1899 		rxd->wrx_special = 0;
   1900 	}
   1901 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1902 
   1903 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1904 }
   1905 
   1906 /*
   1907  * Device driver interface functions and commonly used functions.
   1908  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1909  */
   1910 
   1911 /* Lookup supported device table */
   1912 static const struct wm_product *
   1913 wm_lookup(const struct pci_attach_args *pa)
   1914 {
   1915 	const struct wm_product *wmp;
   1916 
   1917 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1918 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1919 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1920 			return wmp;
   1921 	}
   1922 	return NULL;
   1923 }
   1924 
   1925 /* The match function (ca_match) */
   1926 static int
   1927 wm_match(device_t parent, cfdata_t cf, void *aux)
   1928 {
   1929 	struct pci_attach_args *pa = aux;
   1930 
   1931 	if (wm_lookup(pa) != NULL)
   1932 		return 1;
   1933 
   1934 	return 0;
   1935 }
   1936 
   1937 /* The attach function (ca_attach) */
   1938 static void
   1939 wm_attach(device_t parent, device_t self, void *aux)
   1940 {
   1941 	struct wm_softc *sc = device_private(self);
   1942 	struct pci_attach_args *pa = aux;
   1943 	prop_dictionary_t dict;
   1944 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1945 	pci_chipset_tag_t pc = pa->pa_pc;
   1946 	int counts[PCI_INTR_TYPE_SIZE];
   1947 	pci_intr_type_t max_type;
   1948 	const char *eetype, *xname;
   1949 	bus_space_tag_t memt;
   1950 	bus_space_handle_t memh;
   1951 	bus_size_t memsize;
   1952 	int memh_valid;
   1953 	int i, error;
   1954 	const struct wm_product *wmp;
   1955 	prop_data_t ea;
   1956 	prop_number_t pn;
   1957 	uint8_t enaddr[ETHER_ADDR_LEN];
   1958 	char buf[256];
   1959 	char wqname[MAXCOMLEN];
   1960 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1961 	pcireg_t preg, memtype;
   1962 	uint16_t eeprom_data, apme_mask;
   1963 	bool force_clear_smbi;
   1964 	uint32_t link_mode;
   1965 	uint32_t reg;
   1966 
   1967 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
   1968 	sc->sc_debug = WM_DEBUG_DEFAULT;
   1969 #endif
   1970 	sc->sc_dev = self;
   1971 	callout_init(&sc->sc_tick_ch, CALLOUT_MPSAFE);
   1972 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   1973 	sc->sc_core_stopping = false;
   1974 
   1975 	wmp = wm_lookup(pa);
   1976 #ifdef DIAGNOSTIC
   1977 	if (wmp == NULL) {
   1978 		printf("\n");
   1979 		panic("wm_attach: impossible");
   1980 	}
   1981 #endif
   1982 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1983 
   1984 	sc->sc_pc = pa->pa_pc;
   1985 	sc->sc_pcitag = pa->pa_tag;
   1986 
   1987 	if (pci_dma64_available(pa)) {
   1988 		aprint_verbose(", 64-bit DMA");
   1989 		sc->sc_dmat = pa->pa_dmat64;
   1990 	} else {
   1991 		aprint_verbose(", 32-bit DMA");
   1992 		sc->sc_dmat = pa->pa_dmat;
   1993 	}
   1994 
   1995 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1996 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1997 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1998 
   1999 	sc->sc_type = wmp->wmp_type;
   2000 
   2001 	/* Set default function pointers */
   2002 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   2003 	sc->phy.release = sc->nvm.release = wm_put_null;
   2004 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   2005 
   2006 	if (sc->sc_type < WM_T_82543) {
   2007 		if (sc->sc_rev < 2) {
   2008 			aprint_error_dev(sc->sc_dev,
   2009 			    "i82542 must be at least rev. 2\n");
   2010 			return;
   2011 		}
   2012 		if (sc->sc_rev < 3)
   2013 			sc->sc_type = WM_T_82542_2_0;
   2014 	}
   2015 
   2016 	/*
   2017 	 * Disable MSI for Errata:
   2018 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   2019 	 *
   2020 	 *  82544: Errata 25
   2021 	 *  82540: Errata  6 (easy to reproduce device timeout)
   2022 	 *  82545: Errata  4 (easy to reproduce device timeout)
   2023 	 *  82546: Errata 26 (easy to reproduce device timeout)
   2024 	 *  82541: Errata  7 (easy to reproduce device timeout)
   2025 	 *
   2026 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   2027 	 *
   2028 	 *  82571 & 82572: Errata 63
   2029 	 */
   2030 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   2031 	    || (sc->sc_type == WM_T_82572))
   2032 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   2033 
   2034 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2035 	    || (sc->sc_type == WM_T_82580)
   2036 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   2037 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   2038 		sc->sc_flags |= WM_F_NEWQUEUE;
   2039 
   2040 	/* Set device properties (mactype) */
   2041 	dict = device_properties(sc->sc_dev);
   2042 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   2043 
   2044 	/*
   2045 	 * Map the device.  All devices support memory-mapped acccess,
   2046 	 * and it is really required for normal operation.
   2047 	 */
   2048 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   2049 	switch (memtype) {
   2050 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   2051 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   2052 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   2053 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   2054 		break;
   2055 	default:
   2056 		memh_valid = 0;
   2057 		break;
   2058 	}
   2059 
   2060 	if (memh_valid) {
   2061 		sc->sc_st = memt;
   2062 		sc->sc_sh = memh;
   2063 		sc->sc_ss = memsize;
   2064 	} else {
   2065 		aprint_error_dev(sc->sc_dev,
   2066 		    "unable to map device registers\n");
   2067 		return;
   2068 	}
   2069 
   2070 	/*
   2071 	 * In addition, i82544 and later support I/O mapped indirect
   2072 	 * register access.  It is not desirable (nor supported in
   2073 	 * this driver) to use it for normal operation, though it is
   2074 	 * required to work around bugs in some chip versions.
   2075 	 */
   2076 	switch (sc->sc_type) {
   2077 	case WM_T_82544:
   2078 	case WM_T_82541:
   2079 	case WM_T_82541_2:
   2080 	case WM_T_82547:
   2081 	case WM_T_82547_2:
   2082 		/* First we have to find the I/O BAR. */
   2083 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   2084 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   2085 			if (memtype == PCI_MAPREG_TYPE_IO)
   2086 				break;
   2087 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   2088 			    PCI_MAPREG_MEM_TYPE_64BIT)
   2089 				i += 4;	/* skip high bits, too */
   2090 		}
   2091 		if (i < PCI_MAPREG_END) {
   2092 			/*
   2093 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   2094 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   2095 			 * It's no problem because newer chips has no this
   2096 			 * bug.
   2097 			 *
   2098 			 * The i8254x doesn't apparently respond when the
   2099 			 * I/O BAR is 0, which looks somewhat like it's not
   2100 			 * been configured.
   2101 			 */
   2102 			preg = pci_conf_read(pc, pa->pa_tag, i);
   2103 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   2104 				aprint_error_dev(sc->sc_dev,
   2105 				    "WARNING: I/O BAR at zero.\n");
   2106 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   2107 			    0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)
   2108 			    == 0) {
   2109 				sc->sc_flags |= WM_F_IOH_VALID;
   2110 			} else
   2111 				aprint_error_dev(sc->sc_dev,
   2112 				    "WARNING: unable to map I/O space\n");
   2113 		}
   2114 		break;
   2115 	default:
   2116 		break;
   2117 	}
   2118 
   2119 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   2120 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   2121 	preg |= PCI_COMMAND_MASTER_ENABLE;
   2122 	if (sc->sc_type < WM_T_82542_2_1)
   2123 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   2124 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   2125 
   2126 	/* Power up chip */
   2127 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   2128 	    && error != EOPNOTSUPP) {
   2129 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   2130 		return;
   2131 	}
   2132 
   2133 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   2134 	/*
   2135 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   2136 	 * resource.
   2137 	 */
   2138 	if (sc->sc_nqueues > 1) {
   2139 		max_type = PCI_INTR_TYPE_MSIX;
   2140 		/*
   2141 		 *  82583 has a MSI-X capability in the PCI configuration space
   2142 		 * but it doesn't support it. At least the document doesn't
   2143 		 * say anything about MSI-X.
   2144 		 */
   2145 		counts[PCI_INTR_TYPE_MSIX]
   2146 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2147 	} else {
   2148 		max_type = PCI_INTR_TYPE_MSI;
   2149 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2150 	}
   2151 
   2152 	/* Allocation settings */
   2153 	counts[PCI_INTR_TYPE_MSI] = 1;
   2154 	counts[PCI_INTR_TYPE_INTX] = 1;
   2155 	/* overridden by disable flags */
   2156 	if (wm_disable_msi != 0) {
   2157 		counts[PCI_INTR_TYPE_MSI] = 0;
   2158 		if (wm_disable_msix != 0) {
   2159 			max_type = PCI_INTR_TYPE_INTX;
   2160 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2161 		}
   2162 	} else if (wm_disable_msix != 0) {
   2163 		max_type = PCI_INTR_TYPE_MSI;
   2164 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2165 	}
   2166 
   2167 alloc_retry:
   2168 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2169 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2170 		return;
   2171 	}
   2172 
   2173 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2174 		error = wm_setup_msix(sc);
   2175 		if (error) {
   2176 			pci_intr_release(pc, sc->sc_intrs,
   2177 			    counts[PCI_INTR_TYPE_MSIX]);
   2178 
   2179 			/* Setup for MSI: Disable MSI-X */
   2180 			max_type = PCI_INTR_TYPE_MSI;
   2181 			counts[PCI_INTR_TYPE_MSI] = 1;
   2182 			counts[PCI_INTR_TYPE_INTX] = 1;
   2183 			goto alloc_retry;
   2184 		}
   2185 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2186 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2187 		error = wm_setup_legacy(sc);
   2188 		if (error) {
   2189 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2190 			    counts[PCI_INTR_TYPE_MSI]);
   2191 
   2192 			/* The next try is for INTx: Disable MSI */
   2193 			max_type = PCI_INTR_TYPE_INTX;
   2194 			counts[PCI_INTR_TYPE_INTX] = 1;
   2195 			goto alloc_retry;
   2196 		}
   2197 	} else {
   2198 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2199 		error = wm_setup_legacy(sc);
   2200 		if (error) {
   2201 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2202 			    counts[PCI_INTR_TYPE_INTX]);
   2203 			return;
   2204 		}
   2205 	}
   2206 
   2207 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2208 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2209 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2210 	    WQ_PERCPU | WQ_MPSAFE);
   2211 	if (error) {
   2212 		aprint_error_dev(sc->sc_dev,
   2213 		    "unable to create TxRx workqueue\n");
   2214 		goto out;
   2215 	}
   2216 
   2217 	snprintf(wqname, sizeof(wqname), "%sReset", device_xname(sc->sc_dev));
   2218 	error = workqueue_create(&sc->sc_reset_wq, wqname,
   2219 	    wm_handle_reset_work, sc, WM_WORKQUEUE_PRI, IPL_SOFTCLOCK,
   2220 	    WQ_MPSAFE);
   2221 	if (error) {
   2222 		workqueue_destroy(sc->sc_queue_wq);
   2223 		aprint_error_dev(sc->sc_dev,
   2224 		    "unable to create reset workqueue\n");
   2225 		goto out;
   2226 	}
   2227 
   2228 	/*
   2229 	 * Check the function ID (unit number of the chip).
   2230 	 */
   2231 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2232 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2233 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2234 	    || (sc->sc_type == WM_T_82580)
   2235 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2236 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2237 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2238 	else
   2239 		sc->sc_funcid = 0;
   2240 
   2241 	/*
   2242 	 * Determine a few things about the bus we're connected to.
   2243 	 */
   2244 	if (sc->sc_type < WM_T_82543) {
   2245 		/* We don't really know the bus characteristics here. */
   2246 		sc->sc_bus_speed = 33;
   2247 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2248 		/*
   2249 		 * CSA (Communication Streaming Architecture) is about as fast
   2250 		 * a 32-bit 66MHz PCI Bus.
   2251 		 */
   2252 		sc->sc_flags |= WM_F_CSA;
   2253 		sc->sc_bus_speed = 66;
   2254 		aprint_verbose_dev(sc->sc_dev,
   2255 		    "Communication Streaming Architecture\n");
   2256 		if (sc->sc_type == WM_T_82547) {
   2257 			callout_init(&sc->sc_txfifo_ch, CALLOUT_MPSAFE);
   2258 			callout_setfunc(&sc->sc_txfifo_ch,
   2259 			    wm_82547_txfifo_stall, sc);
   2260 			aprint_verbose_dev(sc->sc_dev,
   2261 			    "using 82547 Tx FIFO stall work-around\n");
   2262 		}
   2263 	} else if (sc->sc_type >= WM_T_82571) {
   2264 		sc->sc_flags |= WM_F_PCIE;
   2265 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2266 		    && (sc->sc_type != WM_T_ICH10)
   2267 		    && (sc->sc_type != WM_T_PCH)
   2268 		    && (sc->sc_type != WM_T_PCH2)
   2269 		    && (sc->sc_type != WM_T_PCH_LPT)
   2270 		    && (sc->sc_type != WM_T_PCH_SPT)
   2271 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2272 			/* ICH* and PCH* have no PCIe capability registers */
   2273 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2274 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2275 				NULL) == 0)
   2276 				aprint_error_dev(sc->sc_dev,
   2277 				    "unable to find PCIe capability\n");
   2278 		}
   2279 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2280 	} else {
   2281 		reg = CSR_READ(sc, WMREG_STATUS);
   2282 		if (reg & STATUS_BUS64)
   2283 			sc->sc_flags |= WM_F_BUS64;
   2284 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2285 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2286 
   2287 			sc->sc_flags |= WM_F_PCIX;
   2288 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2289 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2290 				aprint_error_dev(sc->sc_dev,
   2291 				    "unable to find PCIX capability\n");
   2292 			else if (sc->sc_type != WM_T_82545_3 &&
   2293 			    sc->sc_type != WM_T_82546_3) {
   2294 				/*
   2295 				 * Work around a problem caused by the BIOS
   2296 				 * setting the max memory read byte count
   2297 				 * incorrectly.
   2298 				 */
   2299 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2300 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2301 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2302 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2303 
   2304 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2305 				    PCIX_CMD_BYTECNT_SHIFT;
   2306 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2307 				    PCIX_STATUS_MAXB_SHIFT;
   2308 				if (bytecnt > maxb) {
   2309 					aprint_verbose_dev(sc->sc_dev,
   2310 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2311 					    512 << bytecnt, 512 << maxb);
   2312 					pcix_cmd = (pcix_cmd &
   2313 					    ~PCIX_CMD_BYTECNT_MASK) |
   2314 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2315 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2316 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2317 					    pcix_cmd);
   2318 				}
   2319 			}
   2320 		}
   2321 		/*
   2322 		 * The quad port adapter is special; it has a PCIX-PCIX
   2323 		 * bridge on the board, and can run the secondary bus at
   2324 		 * a higher speed.
   2325 		 */
   2326 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2327 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2328 								      : 66;
   2329 		} else if (sc->sc_flags & WM_F_PCIX) {
   2330 			switch (reg & STATUS_PCIXSPD_MASK) {
   2331 			case STATUS_PCIXSPD_50_66:
   2332 				sc->sc_bus_speed = 66;
   2333 				break;
   2334 			case STATUS_PCIXSPD_66_100:
   2335 				sc->sc_bus_speed = 100;
   2336 				break;
   2337 			case STATUS_PCIXSPD_100_133:
   2338 				sc->sc_bus_speed = 133;
   2339 				break;
   2340 			default:
   2341 				aprint_error_dev(sc->sc_dev,
   2342 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2343 				    reg & STATUS_PCIXSPD_MASK);
   2344 				sc->sc_bus_speed = 66;
   2345 				break;
   2346 			}
   2347 		} else
   2348 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2349 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2350 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2351 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2352 	}
   2353 
   2354 	/* clear interesting stat counters */
   2355 	CSR_READ(sc, WMREG_COLC);
   2356 	CSR_READ(sc, WMREG_RXERRC);
   2357 
   2358 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2359 	    || (sc->sc_type >= WM_T_ICH8))
   2360 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2361 	if (sc->sc_type >= WM_T_ICH8)
   2362 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2363 
   2364 	/* Set PHY, NVM mutex related stuff */
   2365 	switch (sc->sc_type) {
   2366 	case WM_T_82542_2_0:
   2367 	case WM_T_82542_2_1:
   2368 	case WM_T_82543:
   2369 	case WM_T_82544:
   2370 		/* Microwire */
   2371 		sc->nvm.read = wm_nvm_read_uwire;
   2372 		sc->sc_nvm_wordsize = 64;
   2373 		sc->sc_nvm_addrbits = 6;
   2374 		break;
   2375 	case WM_T_82540:
   2376 	case WM_T_82545:
   2377 	case WM_T_82545_3:
   2378 	case WM_T_82546:
   2379 	case WM_T_82546_3:
   2380 		/* Microwire */
   2381 		sc->nvm.read = wm_nvm_read_uwire;
   2382 		reg = CSR_READ(sc, WMREG_EECD);
   2383 		if (reg & EECD_EE_SIZE) {
   2384 			sc->sc_nvm_wordsize = 256;
   2385 			sc->sc_nvm_addrbits = 8;
   2386 		} else {
   2387 			sc->sc_nvm_wordsize = 64;
   2388 			sc->sc_nvm_addrbits = 6;
   2389 		}
   2390 		sc->sc_flags |= WM_F_LOCK_EECD;
   2391 		sc->nvm.acquire = wm_get_eecd;
   2392 		sc->nvm.release = wm_put_eecd;
   2393 		break;
   2394 	case WM_T_82541:
   2395 	case WM_T_82541_2:
   2396 	case WM_T_82547:
   2397 	case WM_T_82547_2:
   2398 		reg = CSR_READ(sc, WMREG_EECD);
   2399 		/*
   2400 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2401 		 * on 8254[17], so set flags and functios before calling it.
   2402 		 */
   2403 		sc->sc_flags |= WM_F_LOCK_EECD;
   2404 		sc->nvm.acquire = wm_get_eecd;
   2405 		sc->nvm.release = wm_put_eecd;
   2406 		if (reg & EECD_EE_TYPE) {
   2407 			/* SPI */
   2408 			sc->nvm.read = wm_nvm_read_spi;
   2409 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2410 			wm_nvm_set_addrbits_size_eecd(sc);
   2411 		} else {
   2412 			/* Microwire */
   2413 			sc->nvm.read = wm_nvm_read_uwire;
   2414 			if ((reg & EECD_EE_ABITS) != 0) {
   2415 				sc->sc_nvm_wordsize = 256;
   2416 				sc->sc_nvm_addrbits = 8;
   2417 			} else {
   2418 				sc->sc_nvm_wordsize = 64;
   2419 				sc->sc_nvm_addrbits = 6;
   2420 			}
   2421 		}
   2422 		break;
   2423 	case WM_T_82571:
   2424 	case WM_T_82572:
   2425 		/* SPI */
   2426 		sc->nvm.read = wm_nvm_read_eerd;
   2427 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2428 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2429 		wm_nvm_set_addrbits_size_eecd(sc);
   2430 		sc->phy.acquire = wm_get_swsm_semaphore;
   2431 		sc->phy.release = wm_put_swsm_semaphore;
   2432 		sc->nvm.acquire = wm_get_nvm_82571;
   2433 		sc->nvm.release = wm_put_nvm_82571;
   2434 		break;
   2435 	case WM_T_82573:
   2436 	case WM_T_82574:
   2437 	case WM_T_82583:
   2438 		sc->nvm.read = wm_nvm_read_eerd;
   2439 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2440 		if (sc->sc_type == WM_T_82573) {
   2441 			sc->phy.acquire = wm_get_swsm_semaphore;
   2442 			sc->phy.release = wm_put_swsm_semaphore;
   2443 			sc->nvm.acquire = wm_get_nvm_82571;
   2444 			sc->nvm.release = wm_put_nvm_82571;
   2445 		} else {
   2446 			/* Both PHY and NVM use the same semaphore. */
   2447 			sc->phy.acquire = sc->nvm.acquire
   2448 			    = wm_get_swfwhw_semaphore;
   2449 			sc->phy.release = sc->nvm.release
   2450 			    = wm_put_swfwhw_semaphore;
   2451 		}
   2452 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2453 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2454 			sc->sc_nvm_wordsize = 2048;
   2455 		} else {
   2456 			/* SPI */
   2457 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2458 			wm_nvm_set_addrbits_size_eecd(sc);
   2459 		}
   2460 		break;
   2461 	case WM_T_82575:
   2462 	case WM_T_82576:
   2463 	case WM_T_82580:
   2464 	case WM_T_I350:
   2465 	case WM_T_I354:
   2466 	case WM_T_80003:
   2467 		/* SPI */
   2468 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2469 		wm_nvm_set_addrbits_size_eecd(sc);
   2470 		if ((sc->sc_type == WM_T_80003)
   2471 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2472 			sc->nvm.read = wm_nvm_read_eerd;
   2473 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2474 		} else {
   2475 			sc->nvm.read = wm_nvm_read_spi;
   2476 			sc->sc_flags |= WM_F_LOCK_EECD;
   2477 		}
   2478 		sc->phy.acquire = wm_get_phy_82575;
   2479 		sc->phy.release = wm_put_phy_82575;
   2480 		sc->nvm.acquire = wm_get_nvm_80003;
   2481 		sc->nvm.release = wm_put_nvm_80003;
   2482 		break;
   2483 	case WM_T_ICH8:
   2484 	case WM_T_ICH9:
   2485 	case WM_T_ICH10:
   2486 	case WM_T_PCH:
   2487 	case WM_T_PCH2:
   2488 	case WM_T_PCH_LPT:
   2489 		sc->nvm.read = wm_nvm_read_ich8;
   2490 		/* FLASH */
   2491 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2492 		sc->sc_nvm_wordsize = 2048;
   2493 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2494 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2495 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2496 			aprint_error_dev(sc->sc_dev,
   2497 			    "can't map FLASH registers\n");
   2498 			goto out;
   2499 		}
   2500 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2501 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2502 		    ICH_FLASH_SECTOR_SIZE;
   2503 		sc->sc_ich8_flash_bank_size =
   2504 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2505 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2506 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2507 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2508 		sc->sc_flashreg_offset = 0;
   2509 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2510 		sc->phy.release = wm_put_swflag_ich8lan;
   2511 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2512 		sc->nvm.release = wm_put_nvm_ich8lan;
   2513 		break;
   2514 	case WM_T_PCH_SPT:
   2515 	case WM_T_PCH_CNP:
   2516 		sc->nvm.read = wm_nvm_read_spt;
   2517 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2518 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2519 		sc->sc_flasht = sc->sc_st;
   2520 		sc->sc_flashh = sc->sc_sh;
   2521 		sc->sc_ich8_flash_base = 0;
   2522 		sc->sc_nvm_wordsize =
   2523 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2524 		    * NVM_SIZE_MULTIPLIER;
   2525 		/* It is size in bytes, we want words */
   2526 		sc->sc_nvm_wordsize /= 2;
   2527 		/* Assume 2 banks */
   2528 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2529 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2530 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2531 		sc->phy.release = wm_put_swflag_ich8lan;
   2532 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2533 		sc->nvm.release = wm_put_nvm_ich8lan;
   2534 		break;
   2535 	case WM_T_I210:
   2536 	case WM_T_I211:
   2537 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2538 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2539 		if (wm_nvm_flash_presence_i210(sc)) {
   2540 			sc->nvm.read = wm_nvm_read_eerd;
   2541 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2542 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2543 			wm_nvm_set_addrbits_size_eecd(sc);
   2544 		} else {
   2545 			sc->nvm.read = wm_nvm_read_invm;
   2546 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2547 			sc->sc_nvm_wordsize = INVM_SIZE;
   2548 		}
   2549 		sc->phy.acquire = wm_get_phy_82575;
   2550 		sc->phy.release = wm_put_phy_82575;
   2551 		sc->nvm.acquire = wm_get_nvm_80003;
   2552 		sc->nvm.release = wm_put_nvm_80003;
   2553 		break;
   2554 	default:
   2555 		break;
   2556 	}
   2557 
   2558 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2559 	switch (sc->sc_type) {
   2560 	case WM_T_82571:
   2561 	case WM_T_82572:
   2562 		reg = CSR_READ(sc, WMREG_SWSM2);
   2563 		if ((reg & SWSM2_LOCK) == 0) {
   2564 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2565 			force_clear_smbi = true;
   2566 		} else
   2567 			force_clear_smbi = false;
   2568 		break;
   2569 	case WM_T_82573:
   2570 	case WM_T_82574:
   2571 	case WM_T_82583:
   2572 		force_clear_smbi = true;
   2573 		break;
   2574 	default:
   2575 		force_clear_smbi = false;
   2576 		break;
   2577 	}
   2578 	if (force_clear_smbi) {
   2579 		reg = CSR_READ(sc, WMREG_SWSM);
   2580 		if ((reg & SWSM_SMBI) != 0)
   2581 			aprint_error_dev(sc->sc_dev,
   2582 			    "Please update the Bootagent\n");
   2583 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2584 	}
   2585 
   2586 	/*
   2587 	 * Defer printing the EEPROM type until after verifying the checksum
   2588 	 * This allows the EEPROM type to be printed correctly in the case
   2589 	 * that no EEPROM is attached.
   2590 	 */
   2591 	/*
   2592 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2593 	 * this for later, so we can fail future reads from the EEPROM.
   2594 	 */
   2595 	if (wm_nvm_validate_checksum(sc)) {
   2596 		/*
   2597 		 * Read twice again because some PCI-e parts fail the
   2598 		 * first check due to the link being in sleep state.
   2599 		 */
   2600 		if (wm_nvm_validate_checksum(sc))
   2601 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2602 	}
   2603 
   2604 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2605 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2606 	else {
   2607 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2608 		    sc->sc_nvm_wordsize);
   2609 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2610 			aprint_verbose("iNVM");
   2611 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2612 			aprint_verbose("FLASH(HW)");
   2613 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2614 			aprint_verbose("FLASH");
   2615 		else {
   2616 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2617 				eetype = "SPI";
   2618 			else
   2619 				eetype = "MicroWire";
   2620 			aprint_verbose("(%d address bits) %s EEPROM",
   2621 			    sc->sc_nvm_addrbits, eetype);
   2622 		}
   2623 	}
   2624 	wm_nvm_version(sc);
   2625 	aprint_verbose("\n");
   2626 
   2627 	/*
   2628 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2629 	 * incorrect.
   2630 	 */
   2631 	wm_gmii_setup_phytype(sc, 0, 0);
   2632 
   2633 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2634 	switch (sc->sc_type) {
   2635 	case WM_T_ICH8:
   2636 	case WM_T_ICH9:
   2637 	case WM_T_ICH10:
   2638 	case WM_T_PCH:
   2639 	case WM_T_PCH2:
   2640 	case WM_T_PCH_LPT:
   2641 	case WM_T_PCH_SPT:
   2642 	case WM_T_PCH_CNP:
   2643 		apme_mask = WUC_APME;
   2644 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2645 		if ((eeprom_data & apme_mask) != 0)
   2646 			sc->sc_flags |= WM_F_WOL;
   2647 		break;
   2648 	default:
   2649 		break;
   2650 	}
   2651 
   2652 	/* Reset the chip to a known state. */
   2653 	wm_reset(sc);
   2654 
   2655 	/*
   2656 	 * Check for I21[01] PLL workaround.
   2657 	 *
   2658 	 * Three cases:
   2659 	 * a) Chip is I211.
   2660 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2661 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2662 	 */
   2663 	if (sc->sc_type == WM_T_I211)
   2664 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2665 	if (sc->sc_type == WM_T_I210) {
   2666 		if (!wm_nvm_flash_presence_i210(sc))
   2667 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2668 		else if ((sc->sc_nvm_ver_major < 3)
   2669 		    || ((sc->sc_nvm_ver_major == 3)
   2670 			&& (sc->sc_nvm_ver_minor < 25))) {
   2671 			aprint_verbose_dev(sc->sc_dev,
   2672 			    "ROM image version %d.%d is older than 3.25\n",
   2673 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2674 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2675 		}
   2676 	}
   2677 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2678 		wm_pll_workaround_i210(sc);
   2679 
   2680 	wm_get_wakeup(sc);
   2681 
   2682 	/* Non-AMT based hardware can now take control from firmware */
   2683 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2684 		wm_get_hw_control(sc);
   2685 
   2686 	/*
   2687 	 * Read the Ethernet address from the EEPROM, if not first found
   2688 	 * in device properties.
   2689 	 */
   2690 	ea = prop_dictionary_get(dict, "mac-address");
   2691 	if (ea != NULL) {
   2692 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2693 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2694 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2695 	} else {
   2696 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2697 			aprint_error_dev(sc->sc_dev,
   2698 			    "unable to read Ethernet address\n");
   2699 			goto out;
   2700 		}
   2701 	}
   2702 
   2703 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2704 	    ether_sprintf(enaddr));
   2705 
   2706 	/*
   2707 	 * Read the config info from the EEPROM, and set up various
   2708 	 * bits in the control registers based on their contents.
   2709 	 */
   2710 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2711 	if (pn != NULL) {
   2712 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2713 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2714 	} else {
   2715 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2716 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2717 			goto out;
   2718 		}
   2719 	}
   2720 
   2721 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2722 	if (pn != NULL) {
   2723 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2724 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2725 	} else {
   2726 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2727 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2728 			goto out;
   2729 		}
   2730 	}
   2731 
   2732 	/* check for WM_F_WOL */
   2733 	switch (sc->sc_type) {
   2734 	case WM_T_82542_2_0:
   2735 	case WM_T_82542_2_1:
   2736 	case WM_T_82543:
   2737 		/* dummy? */
   2738 		eeprom_data = 0;
   2739 		apme_mask = NVM_CFG3_APME;
   2740 		break;
   2741 	case WM_T_82544:
   2742 		apme_mask = NVM_CFG2_82544_APM_EN;
   2743 		eeprom_data = cfg2;
   2744 		break;
   2745 	case WM_T_82546:
   2746 	case WM_T_82546_3:
   2747 	case WM_T_82571:
   2748 	case WM_T_82572:
   2749 	case WM_T_82573:
   2750 	case WM_T_82574:
   2751 	case WM_T_82583:
   2752 	case WM_T_80003:
   2753 	case WM_T_82575:
   2754 	case WM_T_82576:
   2755 		apme_mask = NVM_CFG3_APME;
   2756 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2757 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2758 		break;
   2759 	case WM_T_82580:
   2760 	case WM_T_I350:
   2761 	case WM_T_I354:
   2762 	case WM_T_I210:
   2763 	case WM_T_I211:
   2764 		apme_mask = NVM_CFG3_APME;
   2765 		wm_nvm_read(sc,
   2766 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2767 		    1, &eeprom_data);
   2768 		break;
   2769 	case WM_T_ICH8:
   2770 	case WM_T_ICH9:
   2771 	case WM_T_ICH10:
   2772 	case WM_T_PCH:
   2773 	case WM_T_PCH2:
   2774 	case WM_T_PCH_LPT:
   2775 	case WM_T_PCH_SPT:
   2776 	case WM_T_PCH_CNP:
   2777 		/* Already checked before wm_reset () */
   2778 		apme_mask = eeprom_data = 0;
   2779 		break;
   2780 	default: /* XXX 82540 */
   2781 		apme_mask = NVM_CFG3_APME;
   2782 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2783 		break;
   2784 	}
   2785 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2786 	if ((eeprom_data & apme_mask) != 0)
   2787 		sc->sc_flags |= WM_F_WOL;
   2788 
   2789 	/*
   2790 	 * We have the eeprom settings, now apply the special cases
   2791 	 * where the eeprom may be wrong or the board won't support
   2792 	 * wake on lan on a particular port
   2793 	 */
   2794 	switch (sc->sc_pcidevid) {
   2795 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2796 		sc->sc_flags &= ~WM_F_WOL;
   2797 		break;
   2798 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2799 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2800 		/* Wake events only supported on port A for dual fiber
   2801 		 * regardless of eeprom setting */
   2802 		if (sc->sc_funcid == 1)
   2803 			sc->sc_flags &= ~WM_F_WOL;
   2804 		break;
   2805 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2806 		/* If quad port adapter, disable WoL on all but port A */
   2807 		if (sc->sc_funcid != 0)
   2808 			sc->sc_flags &= ~WM_F_WOL;
   2809 		break;
   2810 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2811 		/* Wake events only supported on port A for dual fiber
   2812 		 * regardless of eeprom setting */
   2813 		if (sc->sc_funcid == 1)
   2814 			sc->sc_flags &= ~WM_F_WOL;
   2815 		break;
   2816 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2817 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2818 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2819 		/* If quad port adapter, disable WoL on all but port A */
   2820 		if (sc->sc_funcid != 0)
   2821 			sc->sc_flags &= ~WM_F_WOL;
   2822 		break;
   2823 	}
   2824 
   2825 	if (sc->sc_type >= WM_T_82575) {
   2826 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2827 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2828 			    nvmword);
   2829 			if ((sc->sc_type == WM_T_82575) ||
   2830 			    (sc->sc_type == WM_T_82576)) {
   2831 				/* Check NVM for autonegotiation */
   2832 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2833 				    != 0)
   2834 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2835 			}
   2836 			if ((sc->sc_type == WM_T_82575) ||
   2837 			    (sc->sc_type == WM_T_I350)) {
   2838 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2839 					sc->sc_flags |= WM_F_MAS;
   2840 			}
   2841 		}
   2842 	}
   2843 
   2844 	/*
   2845 	 * XXX need special handling for some multiple port cards
   2846 	 * to disable a paticular port.
   2847 	 */
   2848 
   2849 	if (sc->sc_type >= WM_T_82544) {
   2850 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2851 		if (pn != NULL) {
   2852 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2853 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2854 		} else {
   2855 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2856 				aprint_error_dev(sc->sc_dev,
   2857 				    "unable to read SWDPIN\n");
   2858 				goto out;
   2859 			}
   2860 		}
   2861 	}
   2862 
   2863 	if (cfg1 & NVM_CFG1_ILOS)
   2864 		sc->sc_ctrl |= CTRL_ILOS;
   2865 
   2866 	/*
   2867 	 * XXX
   2868 	 * This code isn't correct because pin 2 and 3 are located
   2869 	 * in different position on newer chips. Check all datasheet.
   2870 	 *
   2871 	 * Until resolve this problem, check if a chip < 82580
   2872 	 */
   2873 	if (sc->sc_type <= WM_T_82580) {
   2874 		if (sc->sc_type >= WM_T_82544) {
   2875 			sc->sc_ctrl |=
   2876 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2877 			    CTRL_SWDPIO_SHIFT;
   2878 			sc->sc_ctrl |=
   2879 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2880 			    CTRL_SWDPINS_SHIFT;
   2881 		} else {
   2882 			sc->sc_ctrl |=
   2883 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2884 			    CTRL_SWDPIO_SHIFT;
   2885 		}
   2886 	}
   2887 
   2888 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2889 		wm_nvm_read(sc,
   2890 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2891 		    1, &nvmword);
   2892 		if (nvmword & NVM_CFG3_ILOS)
   2893 			sc->sc_ctrl |= CTRL_ILOS;
   2894 	}
   2895 
   2896 #if 0
   2897 	if (sc->sc_type >= WM_T_82544) {
   2898 		if (cfg1 & NVM_CFG1_IPS0)
   2899 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2900 		if (cfg1 & NVM_CFG1_IPS1)
   2901 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2902 		sc->sc_ctrl_ext |=
   2903 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2904 		    CTRL_EXT_SWDPIO_SHIFT;
   2905 		sc->sc_ctrl_ext |=
   2906 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2907 		    CTRL_EXT_SWDPINS_SHIFT;
   2908 	} else {
   2909 		sc->sc_ctrl_ext |=
   2910 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2911 		    CTRL_EXT_SWDPIO_SHIFT;
   2912 	}
   2913 #endif
   2914 
   2915 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2916 #if 0
   2917 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2918 #endif
   2919 
   2920 	if (sc->sc_type == WM_T_PCH) {
   2921 		uint16_t val;
   2922 
   2923 		/* Save the NVM K1 bit setting */
   2924 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2925 
   2926 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2927 			sc->sc_nvm_k1_enabled = 1;
   2928 		else
   2929 			sc->sc_nvm_k1_enabled = 0;
   2930 	}
   2931 
   2932 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2933 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2934 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2935 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2936 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2937 	    || sc->sc_type == WM_T_82573
   2938 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2939 		/* Copper only */
   2940 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2941 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2942 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2943 	    || (sc->sc_type ==WM_T_I211)) {
   2944 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2945 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2946 		switch (link_mode) {
   2947 		case CTRL_EXT_LINK_MODE_1000KX:
   2948 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2949 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2950 			break;
   2951 		case CTRL_EXT_LINK_MODE_SGMII:
   2952 			if (wm_sgmii_uses_mdio(sc)) {
   2953 				aprint_normal_dev(sc->sc_dev,
   2954 				    "SGMII(MDIO)\n");
   2955 				sc->sc_flags |= WM_F_SGMII;
   2956 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2957 				break;
   2958 			}
   2959 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2960 			/*FALLTHROUGH*/
   2961 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2962 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2963 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2964 				if (link_mode
   2965 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2966 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2967 					sc->sc_flags |= WM_F_SGMII;
   2968 					aprint_verbose_dev(sc->sc_dev,
   2969 					    "SGMII\n");
   2970 				} else {
   2971 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2972 					aprint_verbose_dev(sc->sc_dev,
   2973 					    "SERDES\n");
   2974 				}
   2975 				break;
   2976 			}
   2977 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2978 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2979 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2980 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2981 				sc->sc_flags |= WM_F_SGMII;
   2982 			}
   2983 			/* Do not change link mode for 100BaseFX */
   2984 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   2985 				break;
   2986 
   2987 			/* Change current link mode setting */
   2988 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2989 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2990 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2991 			else
   2992 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2993 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2994 			break;
   2995 		case CTRL_EXT_LINK_MODE_GMII:
   2996 		default:
   2997 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   2998 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2999 			break;
   3000 		}
   3001 
   3002 		reg &= ~CTRL_EXT_I2C_ENA;
   3003 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   3004 			reg |= CTRL_EXT_I2C_ENA;
   3005 		else
   3006 			reg &= ~CTRL_EXT_I2C_ENA;
   3007 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3008 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   3009 			if (!wm_sgmii_uses_mdio(sc))
   3010 				wm_gmii_setup_phytype(sc, 0, 0);
   3011 			wm_reset_mdicnfg_82580(sc);
   3012 		}
   3013 	} else if (sc->sc_type < WM_T_82543 ||
   3014 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   3015 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   3016 			aprint_error_dev(sc->sc_dev,
   3017 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   3018 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   3019 		}
   3020 	} else {
   3021 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   3022 			aprint_error_dev(sc->sc_dev,
   3023 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   3024 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3025 		}
   3026 	}
   3027 
   3028 	if (sc->sc_type >= WM_T_PCH2)
   3029 		sc->sc_flags |= WM_F_EEE;
   3030 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   3031 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   3032 		/* XXX: Need special handling for I354. (not yet) */
   3033 		if (sc->sc_type != WM_T_I354)
   3034 			sc->sc_flags |= WM_F_EEE;
   3035 	}
   3036 
   3037 	/*
   3038 	 * The I350 has a bug where it always strips the CRC whether
   3039 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   3040 	 */
   3041 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3042 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3043 		sc->sc_flags |= WM_F_CRC_STRIP;
   3044 
   3045 	/* Set device properties (macflags) */
   3046 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   3047 
   3048 	if (sc->sc_flags != 0) {
   3049 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   3050 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   3051 	}
   3052 
   3053 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   3054 
   3055 	/* Initialize the media structures accordingly. */
   3056 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   3057 		wm_gmii_mediainit(sc, wmp->wmp_product);
   3058 	else
   3059 		wm_tbi_mediainit(sc); /* All others */
   3060 
   3061 	ifp = &sc->sc_ethercom.ec_if;
   3062 	xname = device_xname(sc->sc_dev);
   3063 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   3064 	ifp->if_softc = sc;
   3065 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   3066 	ifp->if_extflags = IFEF_MPSAFE;
   3067 	ifp->if_ioctl = wm_ioctl;
   3068 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3069 		ifp->if_start = wm_nq_start;
   3070 		/*
   3071 		 * When the number of CPUs is one and the controller can use
   3072 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   3073 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   3074 		 * and the other is used for link status changing.
   3075 		 * In this situation, wm_nq_transmit() is disadvantageous
   3076 		 * because of wm_select_txqueue() and pcq(9) overhead.
   3077 		 */
   3078 		if (wm_is_using_multiqueue(sc))
   3079 			ifp->if_transmit = wm_nq_transmit;
   3080 	} else {
   3081 		ifp->if_start = wm_start;
   3082 		/*
   3083 		 * wm_transmit() has the same disadvantages as wm_nq_transmit()
   3084 		 * described above.
   3085 		 */
   3086 		if (wm_is_using_multiqueue(sc))
   3087 			ifp->if_transmit = wm_transmit;
   3088 	}
   3089 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   3090 	ifp->if_init = wm_init;
   3091 	ifp->if_stop = wm_stop;
   3092 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   3093 	IFQ_SET_READY(&ifp->if_snd);
   3094 
   3095 	/* Check for jumbo frame */
   3096 	switch (sc->sc_type) {
   3097 	case WM_T_82573:
   3098 		/* XXX limited to 9234 if ASPM is disabled */
   3099 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   3100 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   3101 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3102 		break;
   3103 	case WM_T_82571:
   3104 	case WM_T_82572:
   3105 	case WM_T_82574:
   3106 	case WM_T_82583:
   3107 	case WM_T_82575:
   3108 	case WM_T_82576:
   3109 	case WM_T_82580:
   3110 	case WM_T_I350:
   3111 	case WM_T_I354:
   3112 	case WM_T_I210:
   3113 	case WM_T_I211:
   3114 	case WM_T_80003:
   3115 	case WM_T_ICH9:
   3116 	case WM_T_ICH10:
   3117 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   3118 	case WM_T_PCH_LPT:
   3119 	case WM_T_PCH_SPT:
   3120 	case WM_T_PCH_CNP:
   3121 		/* XXX limited to 9234 */
   3122 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3123 		break;
   3124 	case WM_T_PCH:
   3125 		/* XXX limited to 4096 */
   3126 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3127 		break;
   3128 	case WM_T_82542_2_0:
   3129 	case WM_T_82542_2_1:
   3130 	case WM_T_ICH8:
   3131 		/* No support for jumbo frame */
   3132 		break;
   3133 	default:
   3134 		/* ETHER_MAX_LEN_JUMBO */
   3135 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3136 		break;
   3137 	}
   3138 
   3139 	/* If we're a i82543 or greater, we can support VLANs. */
   3140 	if (sc->sc_type >= WM_T_82543) {
   3141 		sc->sc_ethercom.ec_capabilities |=
   3142 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   3143 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   3144 	}
   3145 
   3146 	if ((sc->sc_flags & WM_F_EEE) != 0)
   3147 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   3148 
   3149 	/*
   3150 	 * We can perform TCPv4 and UDPv4 checksums in-bound.  Only
   3151 	 * on i82543 and later.
   3152 	 */
   3153 	if (sc->sc_type >= WM_T_82543) {
   3154 		ifp->if_capabilities |=
   3155 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3156 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3157 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3158 		    IFCAP_CSUM_TCPv6_Tx |
   3159 		    IFCAP_CSUM_UDPv6_Tx;
   3160 	}
   3161 
   3162 	/*
   3163 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3164 	 *
   3165 	 *	82541GI (8086:1076) ... no
   3166 	 *	82572EI (8086:10b9) ... yes
   3167 	 */
   3168 	if (sc->sc_type >= WM_T_82571) {
   3169 		ifp->if_capabilities |=
   3170 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3171 	}
   3172 
   3173 	/*
   3174 	 * If we're a i82544 or greater (except i82547), we can do
   3175 	 * TCP segmentation offload.
   3176 	 */
   3177 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547)
   3178 		ifp->if_capabilities |= IFCAP_TSOv4;
   3179 
   3180 	if (sc->sc_type >= WM_T_82571)
   3181 		ifp->if_capabilities |= IFCAP_TSOv6;
   3182 
   3183 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3184 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3185 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3186 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3187 
   3188 	/* Attach the interface. */
   3189 	if_initialize(ifp);
   3190 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3191 	ether_ifattach(ifp, enaddr);
   3192 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3193 	if_register(ifp);
   3194 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3195 	    RND_FLAG_DEFAULT);
   3196 
   3197 #ifdef WM_EVENT_COUNTERS
   3198 	/* Attach event counters. */
   3199 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3200 	    NULL, xname, "linkintr");
   3201 
   3202 	if (sc->sc_type >= WM_T_82542_2_1) {
   3203 		evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3204 		    NULL, xname, "tx_xoff");
   3205 		evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3206 		    NULL, xname, "tx_xon");
   3207 		evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3208 		    NULL, xname, "rx_xoff");
   3209 		evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3210 		    NULL, xname, "rx_xon");
   3211 		evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3212 		    NULL, xname, "rx_macctl");
   3213 	}
   3214 
   3215 	evcnt_attach_dynamic(&sc->sc_ev_crcerrs, EVCNT_TYPE_MISC,
   3216 	    NULL, xname, "CRC Error");
   3217 	evcnt_attach_dynamic(&sc->sc_ev_symerrc, EVCNT_TYPE_MISC,
   3218 	    NULL, xname, "Symbol Error");
   3219 
   3220 	if (sc->sc_type >= WM_T_82543) {
   3221 		evcnt_attach_dynamic(&sc->sc_ev_algnerrc, EVCNT_TYPE_MISC,
   3222 		    NULL, xname, "Alignment Error");
   3223 		evcnt_attach_dynamic(&sc->sc_ev_rxerrc, EVCNT_TYPE_MISC,
   3224 		    NULL, xname, "Receive Error");
   3225 		evcnt_attach_dynamic(&sc->sc_ev_cexterr, EVCNT_TYPE_MISC,
   3226 		    NULL, xname, "Carrier Extension Error");
   3227 	}
   3228 
   3229 	evcnt_attach_dynamic(&sc->sc_ev_mpc, EVCNT_TYPE_MISC,
   3230 	    NULL, xname, "Missed Packets");
   3231 	evcnt_attach_dynamic(&sc->sc_ev_colc, EVCNT_TYPE_MISC,
   3232 	    NULL, xname, "Collision");
   3233 	evcnt_attach_dynamic(&sc->sc_ev_sec, EVCNT_TYPE_MISC,
   3234 	    NULL, xname, "Sequence Error");
   3235 	evcnt_attach_dynamic(&sc->sc_ev_rlec, EVCNT_TYPE_MISC,
   3236 	    NULL, xname, "Receive Length Error");
   3237 	evcnt_attach_dynamic(&sc->sc_ev_scc, EVCNT_TYPE_MISC,
   3238 	    NULL, xname, "Single Collision");
   3239 	evcnt_attach_dynamic(&sc->sc_ev_ecol, EVCNT_TYPE_MISC,
   3240 	    NULL, xname, "Excessive Collisions");
   3241 	evcnt_attach_dynamic(&sc->sc_ev_mcc, EVCNT_TYPE_MISC,
   3242 	    NULL, xname, "Multiple Collision");
   3243 	evcnt_attach_dynamic(&sc->sc_ev_latecol, EVCNT_TYPE_MISC,
   3244 	    NULL, xname, "Late Collisions");
   3245 	evcnt_attach_dynamic(&sc->sc_ev_dc, EVCNT_TYPE_MISC,
   3246 	    NULL, xname, "Defer");
   3247 	evcnt_attach_dynamic(&sc->sc_ev_gprc, EVCNT_TYPE_MISC,
   3248 	    NULL, xname, "Good Packets Rx");
   3249 	evcnt_attach_dynamic(&sc->sc_ev_bprc, EVCNT_TYPE_MISC,
   3250 	    NULL, xname, "Broadcast Packets Rx");
   3251 	evcnt_attach_dynamic(&sc->sc_ev_mprc, EVCNT_TYPE_MISC,
   3252 	    NULL, xname, "Multicast Packets Rx");
   3253 	evcnt_attach_dynamic(&sc->sc_ev_gptc, EVCNT_TYPE_MISC,
   3254 	    NULL, xname, "Good Packets Tx");
   3255 	evcnt_attach_dynamic(&sc->sc_ev_gorc, EVCNT_TYPE_MISC,
   3256 	    NULL, xname, "Good Octets Rx");
   3257 	evcnt_attach_dynamic(&sc->sc_ev_gotc, EVCNT_TYPE_MISC,
   3258 	    NULL, xname, "Good Octets Tx");
   3259 	evcnt_attach_dynamic(&sc->sc_ev_rnbc, EVCNT_TYPE_MISC,
   3260 	    NULL, xname, "Rx No Buffers");
   3261 	evcnt_attach_dynamic(&sc->sc_ev_ruc, EVCNT_TYPE_MISC,
   3262 	    NULL, xname, "Rx Undersize");
   3263 	evcnt_attach_dynamic(&sc->sc_ev_rfc, EVCNT_TYPE_MISC,
   3264 	    NULL, xname, "Rx Fragment");
   3265 	evcnt_attach_dynamic(&sc->sc_ev_roc, EVCNT_TYPE_MISC,
   3266 	    NULL, xname, "Rx Oversize");
   3267 	evcnt_attach_dynamic(&sc->sc_ev_rjc, EVCNT_TYPE_MISC,
   3268 	    NULL, xname, "Rx Jabber");
   3269 	evcnt_attach_dynamic(&sc->sc_ev_tor, EVCNT_TYPE_MISC,
   3270 	    NULL, xname, "Total Octets Rx");
   3271 	evcnt_attach_dynamic(&sc->sc_ev_tot, EVCNT_TYPE_MISC,
   3272 	    NULL, xname, "Total Octets Tx");
   3273 	evcnt_attach_dynamic(&sc->sc_ev_tpr, EVCNT_TYPE_MISC,
   3274 	    NULL, xname, "Total Packets Rx");
   3275 	evcnt_attach_dynamic(&sc->sc_ev_tpt, EVCNT_TYPE_MISC,
   3276 	    NULL, xname, "Total Packets Tx");
   3277 	evcnt_attach_dynamic(&sc->sc_ev_mptc, EVCNT_TYPE_MISC,
   3278 	    NULL, xname, "Multicast Packets Tx");
   3279 	evcnt_attach_dynamic(&sc->sc_ev_bptc, EVCNT_TYPE_MISC,
   3280 	    NULL, xname, "Broadcast Packets Tx Count");
   3281 	evcnt_attach_dynamic(&sc->sc_ev_prc64, EVCNT_TYPE_MISC,
   3282 	    NULL, xname, "Packets Rx (64 bytes)");
   3283 	evcnt_attach_dynamic(&sc->sc_ev_prc127, EVCNT_TYPE_MISC,
   3284 	    NULL, xname, "Packets Rx (65-127 bytes)");
   3285 	evcnt_attach_dynamic(&sc->sc_ev_prc255, EVCNT_TYPE_MISC,
   3286 	    NULL, xname, "Packets Rx (128-255 bytes)");
   3287 	evcnt_attach_dynamic(&sc->sc_ev_prc511, EVCNT_TYPE_MISC,
   3288 	    NULL, xname, "Packets Rx (255-511 bytes)");
   3289 	evcnt_attach_dynamic(&sc->sc_ev_prc1023, EVCNT_TYPE_MISC,
   3290 	    NULL, xname, "Packets Rx (512-1023 bytes)");
   3291 	evcnt_attach_dynamic(&sc->sc_ev_prc1522, EVCNT_TYPE_MISC,
   3292 	    NULL, xname, "Packets Rx (1024-1522 bytes)");
   3293 	evcnt_attach_dynamic(&sc->sc_ev_ptc64, EVCNT_TYPE_MISC,
   3294 	    NULL, xname, "Packets Tx (64 bytes)");
   3295 	evcnt_attach_dynamic(&sc->sc_ev_ptc127, EVCNT_TYPE_MISC,
   3296 	    NULL, xname, "Packets Tx (65-127 bytes)");
   3297 	evcnt_attach_dynamic(&sc->sc_ev_ptc255, EVCNT_TYPE_MISC,
   3298 	    NULL, xname, "Packets Tx (128-255 bytes)");
   3299 	evcnt_attach_dynamic(&sc->sc_ev_ptc511, EVCNT_TYPE_MISC,
   3300 	    NULL, xname, "Packets Tx (256-511 bytes)");
   3301 	evcnt_attach_dynamic(&sc->sc_ev_ptc1023, EVCNT_TYPE_MISC,
   3302 	    NULL, xname, "Packets Tx (512-1023 bytes)");
   3303 	evcnt_attach_dynamic(&sc->sc_ev_ptc1522, EVCNT_TYPE_MISC,
   3304 	    NULL, xname, "Packets Tx (1024-1522 Bytes)");
   3305 	evcnt_attach_dynamic(&sc->sc_ev_iac, EVCNT_TYPE_MISC,
   3306 	    NULL, xname, "Interrupt Assertion");
   3307 	evcnt_attach_dynamic(&sc->sc_ev_icrxptc, EVCNT_TYPE_MISC,
   3308 	    NULL, xname, "Intr. Cause Rx Pkt Timer Expire");
   3309 	evcnt_attach_dynamic(&sc->sc_ev_icrxatc, EVCNT_TYPE_MISC,
   3310 	    NULL, xname, "Intr. Cause Rx Abs Timer Expire");
   3311 	evcnt_attach_dynamic(&sc->sc_ev_ictxptc, EVCNT_TYPE_MISC,
   3312 	    NULL, xname, "Intr. Cause Tx Pkt Timer Expire");
   3313 	evcnt_attach_dynamic(&sc->sc_ev_ictxact, EVCNT_TYPE_MISC,
   3314 	    NULL, xname, "Intr. Cause Tx Abs Timer Expire");
   3315 	evcnt_attach_dynamic(&sc->sc_ev_ictxqec, EVCNT_TYPE_MISC,
   3316 	    NULL, xname, "Intr. Cause Tx Queue Empty");
   3317 	evcnt_attach_dynamic(&sc->sc_ev_ictxqmtc, EVCNT_TYPE_MISC,
   3318 	    NULL, xname, "Intr. Cause Tx Queue Min Thresh");
   3319 	evcnt_attach_dynamic(&sc->sc_ev_icrxdmtc, EVCNT_TYPE_MISC,
   3320 	    NULL, xname, "Intr. Cause Rx Desc Min Thresh");
   3321 	evcnt_attach_dynamic(&sc->sc_ev_icrxoc, EVCNT_TYPE_MISC,
   3322 	    NULL, xname, "Interrupt Cause Receiver Overrun");
   3323 	if (sc->sc_type >= WM_T_82543) {
   3324 		evcnt_attach_dynamic(&sc->sc_ev_tncrs, EVCNT_TYPE_MISC,
   3325 		    NULL, xname, "Tx with No CRS");
   3326 		evcnt_attach_dynamic(&sc->sc_ev_tsctc, EVCNT_TYPE_MISC,
   3327 		    NULL, xname, "TCP Segmentation Context Tx");
   3328 		evcnt_attach_dynamic(&sc->sc_ev_tsctfc, EVCNT_TYPE_MISC,
   3329 		    NULL, xname, "TCP Segmentation Context Tx Fail");
   3330 	}
   3331 	if (sc->sc_type >= WM_T_82540) {
   3332 		evcnt_attach_dynamic(&sc->sc_ev_mgtprc, EVCNT_TYPE_MISC,
   3333 		    NULL, xname, "Management Packets RX");
   3334 		evcnt_attach_dynamic(&sc->sc_ev_mgtpdc, EVCNT_TYPE_MISC,
   3335 		    NULL, xname, "Management Packets Dropped");
   3336 		evcnt_attach_dynamic(&sc->sc_ev_mgtptc, EVCNT_TYPE_MISC,
   3337 		    NULL, xname, "Management Packets TX");
   3338 	}
   3339 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003)) {
   3340 		evcnt_attach_dynamic(&sc->sc_ev_b2ogprc, EVCNT_TYPE_MISC,
   3341 		    NULL, xname, "BMC2OS Packets received by host");
   3342 		evcnt_attach_dynamic(&sc->sc_ev_o2bspc, EVCNT_TYPE_MISC,
   3343 		    NULL, xname, "OS2BMC Packets transmitted by host");
   3344 		evcnt_attach_dynamic(&sc->sc_ev_b2ospc, EVCNT_TYPE_MISC,
   3345 		    NULL, xname, "BMC2OS Packets sent by BMC");
   3346 		evcnt_attach_dynamic(&sc->sc_ev_o2bgptc, EVCNT_TYPE_MISC,
   3347 		    NULL, xname, "OS2BMC Packets received by BMC");
   3348 	}
   3349 #endif /* WM_EVENT_COUNTERS */
   3350 
   3351 	sc->sc_txrx_use_workqueue = false;
   3352 
   3353 	if (wm_phy_need_linkdown_discard(sc)) {
   3354 		DPRINTF(sc, WM_DEBUG_LINK,
   3355 		    ("%s: %s: Set linkdown discard flag\n",
   3356 			device_xname(sc->sc_dev), __func__));
   3357 		wm_set_linkdown_discard(sc);
   3358 	}
   3359 
   3360 	wm_init_sysctls(sc);
   3361 
   3362 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3363 		pmf_class_network_register(self, ifp);
   3364 	else
   3365 		aprint_error_dev(self, "couldn't establish power handler\n");
   3366 
   3367 	sc->sc_flags |= WM_F_ATTACHED;
   3368 out:
   3369 	return;
   3370 }
   3371 
   3372 /* The detach function (ca_detach) */
   3373 static int
   3374 wm_detach(device_t self, int flags __unused)
   3375 {
   3376 	struct wm_softc *sc = device_private(self);
   3377 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3378 	int i;
   3379 
   3380 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3381 		return 0;
   3382 
   3383 	/* Stop the interface. Callouts are stopped in it. */
   3384 	IFNET_LOCK(ifp);
   3385 	sc->sc_dying = true;
   3386 	wm_stop(ifp, 1);
   3387 	IFNET_UNLOCK(ifp);
   3388 
   3389 	pmf_device_deregister(self);
   3390 
   3391 	sysctl_teardown(&sc->sc_sysctllog);
   3392 
   3393 #ifdef WM_EVENT_COUNTERS
   3394 	evcnt_detach(&sc->sc_ev_linkintr);
   3395 
   3396 	if (sc->sc_type >= WM_T_82542_2_1) {
   3397 		evcnt_detach(&sc->sc_ev_tx_xoff);
   3398 		evcnt_detach(&sc->sc_ev_tx_xon);
   3399 		evcnt_detach(&sc->sc_ev_rx_xoff);
   3400 		evcnt_detach(&sc->sc_ev_rx_xon);
   3401 		evcnt_detach(&sc->sc_ev_rx_macctl);
   3402 	}
   3403 
   3404 	evcnt_detach(&sc->sc_ev_crcerrs);
   3405 	evcnt_detach(&sc->sc_ev_symerrc);
   3406 
   3407 	if (sc->sc_type >= WM_T_82543) {
   3408 		evcnt_detach(&sc->sc_ev_algnerrc);
   3409 		evcnt_detach(&sc->sc_ev_rxerrc);
   3410 		evcnt_detach(&sc->sc_ev_cexterr);
   3411 	}
   3412 	evcnt_detach(&sc->sc_ev_mpc);
   3413 	evcnt_detach(&sc->sc_ev_colc);
   3414 	evcnt_detach(&sc->sc_ev_sec);
   3415 	evcnt_detach(&sc->sc_ev_rlec);
   3416 	evcnt_detach(&sc->sc_ev_scc);
   3417 	evcnt_detach(&sc->sc_ev_ecol);
   3418 	evcnt_detach(&sc->sc_ev_mcc);
   3419 	evcnt_detach(&sc->sc_ev_latecol);
   3420 	evcnt_detach(&sc->sc_ev_dc);
   3421 	evcnt_detach(&sc->sc_ev_gprc);
   3422 	evcnt_detach(&sc->sc_ev_bprc);
   3423 	evcnt_detach(&sc->sc_ev_mprc);
   3424 	evcnt_detach(&sc->sc_ev_gptc);
   3425 	evcnt_detach(&sc->sc_ev_gorc);
   3426 	evcnt_detach(&sc->sc_ev_gotc);
   3427 	evcnt_detach(&sc->sc_ev_rnbc);
   3428 	evcnt_detach(&sc->sc_ev_ruc);
   3429 	evcnt_detach(&sc->sc_ev_rfc);
   3430 	evcnt_detach(&sc->sc_ev_roc);
   3431 	evcnt_detach(&sc->sc_ev_rjc);
   3432 	evcnt_detach(&sc->sc_ev_tor);
   3433 	evcnt_detach(&sc->sc_ev_tot);
   3434 	evcnt_detach(&sc->sc_ev_tpr);
   3435 	evcnt_detach(&sc->sc_ev_tpt);
   3436 	evcnt_detach(&sc->sc_ev_mptc);
   3437 	evcnt_detach(&sc->sc_ev_bptc);
   3438 	evcnt_detach(&sc->sc_ev_prc64);
   3439 	evcnt_detach(&sc->sc_ev_prc127);
   3440 	evcnt_detach(&sc->sc_ev_prc255);
   3441 	evcnt_detach(&sc->sc_ev_prc511);
   3442 	evcnt_detach(&sc->sc_ev_prc1023);
   3443 	evcnt_detach(&sc->sc_ev_prc1522);
   3444 	evcnt_detach(&sc->sc_ev_ptc64);
   3445 	evcnt_detach(&sc->sc_ev_ptc127);
   3446 	evcnt_detach(&sc->sc_ev_ptc255);
   3447 	evcnt_detach(&sc->sc_ev_ptc511);
   3448 	evcnt_detach(&sc->sc_ev_ptc1023);
   3449 	evcnt_detach(&sc->sc_ev_ptc1522);
   3450 	evcnt_detach(&sc->sc_ev_iac);
   3451 	evcnt_detach(&sc->sc_ev_icrxptc);
   3452 	evcnt_detach(&sc->sc_ev_icrxatc);
   3453 	evcnt_detach(&sc->sc_ev_ictxptc);
   3454 	evcnt_detach(&sc->sc_ev_ictxact);
   3455 	evcnt_detach(&sc->sc_ev_ictxqec);
   3456 	evcnt_detach(&sc->sc_ev_ictxqmtc);
   3457 	evcnt_detach(&sc->sc_ev_icrxdmtc);
   3458 	evcnt_detach(&sc->sc_ev_icrxoc);
   3459 	if (sc->sc_type >= WM_T_82543) {
   3460 		evcnt_detach(&sc->sc_ev_tncrs);
   3461 		evcnt_detach(&sc->sc_ev_tsctc);
   3462 		evcnt_detach(&sc->sc_ev_tsctfc);
   3463 	}
   3464 	if (sc->sc_type >= WM_T_82540) {
   3465 		evcnt_detach(&sc->sc_ev_mgtprc);
   3466 		evcnt_detach(&sc->sc_ev_mgtpdc);
   3467 		evcnt_detach(&sc->sc_ev_mgtptc);
   3468 	}
   3469 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003)) {
   3470 		evcnt_detach(&sc->sc_ev_b2ogprc);
   3471 		evcnt_detach(&sc->sc_ev_o2bspc);
   3472 		evcnt_detach(&sc->sc_ev_b2ospc);
   3473 		evcnt_detach(&sc->sc_ev_o2bgptc);
   3474 	}
   3475 #endif /* WM_EVENT_COUNTERS */
   3476 
   3477 	rnd_detach_source(&sc->rnd_source);
   3478 
   3479 	/* Tell the firmware about the release */
   3480 	mutex_enter(sc->sc_core_lock);
   3481 	wm_release_manageability(sc);
   3482 	wm_release_hw_control(sc);
   3483 	wm_enable_wakeup(sc);
   3484 	mutex_exit(sc->sc_core_lock);
   3485 
   3486 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3487 
   3488 	ether_ifdetach(ifp);
   3489 	if_detach(ifp);
   3490 	if_percpuq_destroy(sc->sc_ipq);
   3491 
   3492 	/* Delete all remaining media. */
   3493 	ifmedia_fini(&sc->sc_mii.mii_media);
   3494 
   3495 	/* Unload RX dmamaps and free mbufs */
   3496 	for (i = 0; i < sc->sc_nqueues; i++) {
   3497 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3498 		mutex_enter(rxq->rxq_lock);
   3499 		wm_rxdrain(rxq);
   3500 		mutex_exit(rxq->rxq_lock);
   3501 	}
   3502 	/* Must unlock here */
   3503 
   3504 	/* Disestablish the interrupt handler */
   3505 	for (i = 0; i < sc->sc_nintrs; i++) {
   3506 		if (sc->sc_ihs[i] != NULL) {
   3507 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3508 			sc->sc_ihs[i] = NULL;
   3509 		}
   3510 	}
   3511 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3512 
   3513 	/* wm_stop() ensured that the workqueues are stopped. */
   3514 	workqueue_destroy(sc->sc_queue_wq);
   3515 	workqueue_destroy(sc->sc_reset_wq);
   3516 
   3517 	for (i = 0; i < sc->sc_nqueues; i++)
   3518 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3519 
   3520 	wm_free_txrx_queues(sc);
   3521 
   3522 	/* Unmap the registers */
   3523 	if (sc->sc_ss) {
   3524 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3525 		sc->sc_ss = 0;
   3526 	}
   3527 	if (sc->sc_ios) {
   3528 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3529 		sc->sc_ios = 0;
   3530 	}
   3531 	if (sc->sc_flashs) {
   3532 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3533 		sc->sc_flashs = 0;
   3534 	}
   3535 
   3536 	if (sc->sc_core_lock)
   3537 		mutex_obj_free(sc->sc_core_lock);
   3538 	if (sc->sc_ich_phymtx)
   3539 		mutex_obj_free(sc->sc_ich_phymtx);
   3540 	if (sc->sc_ich_nvmmtx)
   3541 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3542 
   3543 	return 0;
   3544 }
   3545 
   3546 static bool
   3547 wm_suspend(device_t self, const pmf_qual_t *qual)
   3548 {
   3549 	struct wm_softc *sc = device_private(self);
   3550 
   3551 	wm_release_manageability(sc);
   3552 	wm_release_hw_control(sc);
   3553 	wm_enable_wakeup(sc);
   3554 
   3555 	return true;
   3556 }
   3557 
   3558 static bool
   3559 wm_resume(device_t self, const pmf_qual_t *qual)
   3560 {
   3561 	struct wm_softc *sc = device_private(self);
   3562 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3563 	pcireg_t reg;
   3564 	char buf[256];
   3565 
   3566 	reg = CSR_READ(sc, WMREG_WUS);
   3567 	if (reg != 0) {
   3568 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3569 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3570 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3571 	}
   3572 
   3573 	if (sc->sc_type >= WM_T_PCH2)
   3574 		wm_resume_workarounds_pchlan(sc);
   3575 	IFNET_LOCK(ifp);
   3576 	if ((ifp->if_flags & IFF_UP) == 0) {
   3577 		/* >= PCH_SPT hardware workaround before reset. */
   3578 		if (sc->sc_type >= WM_T_PCH_SPT)
   3579 			wm_flush_desc_rings(sc);
   3580 
   3581 		wm_reset(sc);
   3582 		/* Non-AMT based hardware can now take control from firmware */
   3583 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3584 			wm_get_hw_control(sc);
   3585 		wm_init_manageability(sc);
   3586 	} else {
   3587 		/*
   3588 		 * We called pmf_class_network_register(), so if_init() is
   3589 		 * automatically called when IFF_UP. wm_reset(),
   3590 		 * wm_get_hw_control() and wm_init_manageability() are called
   3591 		 * via wm_init().
   3592 		 */
   3593 	}
   3594 	IFNET_UNLOCK(ifp);
   3595 
   3596 	return true;
   3597 }
   3598 
   3599 /*
   3600  * wm_watchdog:
   3601  *
   3602  *	Watchdog checker.
   3603  */
   3604 static bool
   3605 wm_watchdog(struct ifnet *ifp)
   3606 {
   3607 	int qid;
   3608 	struct wm_softc *sc = ifp->if_softc;
   3609 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3610 
   3611 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3612 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3613 
   3614 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3615 	}
   3616 
   3617 #ifdef WM_DEBUG
   3618 	if (sc->sc_trigger_reset) {
   3619 		/* debug operation, no need for atomicity or reliability */
   3620 		sc->sc_trigger_reset = 0;
   3621 		hang_queue++;
   3622 	}
   3623 #endif
   3624 
   3625 	if (hang_queue == 0)
   3626 		return true;
   3627 
   3628 	if (atomic_swap_uint(&sc->sc_reset_pending, 1) == 0)
   3629 		workqueue_enqueue(sc->sc_reset_wq, &sc->sc_reset_work, NULL);
   3630 
   3631 	return false;
   3632 }
   3633 
   3634 /*
   3635  * Perform an interface watchdog reset.
   3636  */
   3637 static void
   3638 wm_handle_reset_work(struct work *work, void *arg)
   3639 {
   3640 	struct wm_softc * const sc = arg;
   3641 	struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
   3642 
   3643 	/* Don't want ioctl operations to happen */
   3644 	IFNET_LOCK(ifp);
   3645 
   3646 	/* reset the interface. */
   3647 	wm_init(ifp);
   3648 
   3649 	IFNET_UNLOCK(ifp);
   3650 
   3651 	/*
   3652 	 * There are still some upper layer processing which call
   3653 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   3654 	 */
   3655 	/* Try to get more packets going. */
   3656 	ifp->if_start(ifp);
   3657 
   3658 	atomic_store_relaxed(&sc->sc_reset_pending, 0);
   3659 }
   3660 
   3661 
   3662 static void
   3663 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3664 {
   3665 
   3666 	mutex_enter(txq->txq_lock);
   3667 	if (txq->txq_sending &&
   3668 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3669 		wm_watchdog_txq_locked(ifp, txq, hang);
   3670 
   3671 	mutex_exit(txq->txq_lock);
   3672 }
   3673 
   3674 static void
   3675 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3676     uint16_t *hang)
   3677 {
   3678 	struct wm_softc *sc = ifp->if_softc;
   3679 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3680 
   3681 	KASSERT(mutex_owned(txq->txq_lock));
   3682 
   3683 	/*
   3684 	 * Since we're using delayed interrupts, sweep up
   3685 	 * before we report an error.
   3686 	 */
   3687 	wm_txeof(txq, UINT_MAX);
   3688 
   3689 	if (txq->txq_sending)
   3690 		*hang |= __BIT(wmq->wmq_id);
   3691 
   3692 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3693 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3694 		    device_xname(sc->sc_dev));
   3695 	} else {
   3696 #ifdef WM_DEBUG
   3697 		int i, j;
   3698 		struct wm_txsoft *txs;
   3699 #endif
   3700 		log(LOG_ERR,
   3701 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3702 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3703 		    txq->txq_next);
   3704 		if_statinc(ifp, if_oerrors);
   3705 #ifdef WM_DEBUG
   3706 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3707 		     i = WM_NEXTTXS(txq, i)) {
   3708 			txs = &txq->txq_soft[i];
   3709 			printf("txs %d tx %d -> %d\n",
   3710 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3711 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3712 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3713 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3714 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3715 					printf("\t %#08x%08x\n",
   3716 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3717 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3718 				} else {
   3719 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3720 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3721 					    txq->txq_descs[j].wtx_addr.wa_low);
   3722 					printf("\t %#04x%02x%02x%08x\n",
   3723 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3724 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3725 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3726 					    txq->txq_descs[j].wtx_cmdlen);
   3727 				}
   3728 				if (j == txs->txs_lastdesc)
   3729 					break;
   3730 			}
   3731 		}
   3732 #endif
   3733 	}
   3734 }
   3735 
   3736 /*
   3737  * wm_tick:
   3738  *
   3739  *	One second timer, used to check link status, sweep up
   3740  *	completed transmit jobs, etc.
   3741  */
   3742 static void
   3743 wm_tick(void *arg)
   3744 {
   3745 	struct wm_softc *sc = arg;
   3746 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3747 	uint64_t crcerrs, algnerrc, symerrc, mpc, colc,  sec, rlec, rxerrc,
   3748 	    cexterr;
   3749 
   3750 	mutex_enter(sc->sc_core_lock);
   3751 
   3752 	if (sc->sc_core_stopping) {
   3753 		mutex_exit(sc->sc_core_lock);
   3754 		return;
   3755 	}
   3756 
   3757 	crcerrs = CSR_READ(sc, WMREG_CRCERRS);
   3758 	symerrc = CSR_READ(sc, WMREG_SYMERRC);
   3759 	mpc = CSR_READ(sc, WMREG_MPC);
   3760 	colc = CSR_READ(sc, WMREG_COLC);
   3761 	sec = CSR_READ(sc, WMREG_SEC);
   3762 	rlec = CSR_READ(sc, WMREG_RLEC);
   3763 
   3764 	WM_EVCNT_ADD(&sc->sc_ev_crcerrs, crcerrs);
   3765 	WM_EVCNT_ADD(&sc->sc_ev_symerrc, symerrc);
   3766 	WM_EVCNT_ADD(&sc->sc_ev_mpc, mpc);
   3767 	WM_EVCNT_ADD(&sc->sc_ev_colc, colc);
   3768 	WM_EVCNT_ADD(&sc->sc_ev_sec, sec);
   3769 	WM_EVCNT_ADD(&sc->sc_ev_rlec, rlec);
   3770 
   3771 	if (sc->sc_type >= WM_T_82542_2_1) {
   3772 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3773 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3774 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3775 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3776 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3777 	}
   3778 	WM_EVCNT_ADD(&sc->sc_ev_scc, CSR_READ(sc, WMREG_SCC));
   3779 	WM_EVCNT_ADD(&sc->sc_ev_ecol, CSR_READ(sc, WMREG_ECOL));
   3780 	WM_EVCNT_ADD(&sc->sc_ev_mcc, CSR_READ(sc, WMREG_MCC));
   3781 	WM_EVCNT_ADD(&sc->sc_ev_latecol, CSR_READ(sc, WMREG_LATECOL));
   3782 	WM_EVCNT_ADD(&sc->sc_ev_dc, CSR_READ(sc, WMREG_DC));
   3783 	WM_EVCNT_ADD(&sc->sc_ev_prc64, CSR_READ(sc, WMREG_PRC64));
   3784 	WM_EVCNT_ADD(&sc->sc_ev_prc127, CSR_READ(sc, WMREG_PRC127));
   3785 	WM_EVCNT_ADD(&sc->sc_ev_prc255, CSR_READ(sc, WMREG_PRC255));
   3786 	WM_EVCNT_ADD(&sc->sc_ev_prc511, CSR_READ(sc, WMREG_PRC511));
   3787 	WM_EVCNT_ADD(&sc->sc_ev_prc1023, CSR_READ(sc, WMREG_PRC1023));
   3788 	WM_EVCNT_ADD(&sc->sc_ev_prc1522, CSR_READ(sc, WMREG_PRC1522));
   3789 	WM_EVCNT_ADD(&sc->sc_ev_gprc, CSR_READ(sc, WMREG_GPRC));
   3790 	WM_EVCNT_ADD(&sc->sc_ev_bprc, CSR_READ(sc, WMREG_BPRC));
   3791 	WM_EVCNT_ADD(&sc->sc_ev_mprc, CSR_READ(sc, WMREG_MPRC));
   3792 	WM_EVCNT_ADD(&sc->sc_ev_gptc, CSR_READ(sc, WMREG_GPTC));
   3793 
   3794 	WM_EVCNT_ADD(&sc->sc_ev_gorc,
   3795 	    CSR_READ(sc, WMREG_GORCL) +
   3796 	    ((uint64_t)CSR_READ(sc, WMREG_GORCH) << 32));
   3797 	WM_EVCNT_ADD(&sc->sc_ev_gotc,
   3798 	    CSR_READ(sc, WMREG_GOTCL) +
   3799 	    ((uint64_t)CSR_READ(sc, WMREG_GOTCH) << 32));
   3800 
   3801 	WM_EVCNT_ADD(&sc->sc_ev_rnbc, CSR_READ(sc, WMREG_RNBC));
   3802 	WM_EVCNT_ADD(&sc->sc_ev_ruc, CSR_READ(sc, WMREG_RUC));
   3803 	WM_EVCNT_ADD(&sc->sc_ev_rfc, CSR_READ(sc, WMREG_RFC));
   3804 	WM_EVCNT_ADD(&sc->sc_ev_roc, CSR_READ(sc, WMREG_ROC));
   3805 	WM_EVCNT_ADD(&sc->sc_ev_rjc, CSR_READ(sc, WMREG_RJC));
   3806 
   3807 	if (sc->sc_type >= WM_T_82540) {
   3808 		WM_EVCNT_ADD(&sc->sc_ev_mgtprc, CSR_READ(sc, WMREG_MGTPRC));
   3809 		WM_EVCNT_ADD(&sc->sc_ev_mgtpdc, CSR_READ(sc, WMREG_MGTPDC));
   3810 		WM_EVCNT_ADD(&sc->sc_ev_mgtptc, CSR_READ(sc, WMREG_MGTPTC));
   3811 	}
   3812 
   3813 	/*
   3814 	 * The TOR(L) register includes:
   3815 	 *  - Error
   3816 	 *  - Flow control
   3817 	 *  - Broadcast rejected (This note is described in 82574 and newer
   3818 	 *    datasheets. What does "broadcast rejected" mean?)
   3819 	 */
   3820 	WM_EVCNT_ADD(&sc->sc_ev_tor,
   3821 	    CSR_READ(sc, WMREG_TORL) +
   3822 	    ((uint64_t)CSR_READ(sc, WMREG_TORH) << 32));
   3823 	WM_EVCNT_ADD(&sc->sc_ev_tot,
   3824 	    CSR_READ(sc, WMREG_TOTL) +
   3825 	    ((uint64_t)CSR_READ(sc, WMREG_TOTH) << 32));
   3826 
   3827 	WM_EVCNT_ADD(&sc->sc_ev_tpr, CSR_READ(sc, WMREG_TPR));
   3828 	WM_EVCNT_ADD(&sc->sc_ev_tpt, CSR_READ(sc, WMREG_TPT));
   3829 	WM_EVCNT_ADD(&sc->sc_ev_ptc64, CSR_READ(sc, WMREG_PTC64));
   3830 	WM_EVCNT_ADD(&sc->sc_ev_ptc127, CSR_READ(sc, WMREG_PTC127));
   3831 	WM_EVCNT_ADD(&sc->sc_ev_ptc255, CSR_READ(sc, WMREG_PTC255));
   3832 	WM_EVCNT_ADD(&sc->sc_ev_ptc511, CSR_READ(sc, WMREG_PTC511));
   3833 	WM_EVCNT_ADD(&sc->sc_ev_ptc1023, CSR_READ(sc, WMREG_PTC1023));
   3834 	WM_EVCNT_ADD(&sc->sc_ev_ptc1522, CSR_READ(sc, WMREG_PTC1522));
   3835 	WM_EVCNT_ADD(&sc->sc_ev_mptc, CSR_READ(sc, WMREG_MPTC));
   3836 	WM_EVCNT_ADD(&sc->sc_ev_bptc, CSR_READ(sc, WMREG_BPTC));
   3837 	WM_EVCNT_ADD(&sc->sc_ev_iac, CSR_READ(sc, WMREG_IAC));
   3838 	WM_EVCNT_ADD(&sc->sc_ev_icrxptc, CSR_READ(sc, WMREG_ICRXPTC));
   3839 	WM_EVCNT_ADD(&sc->sc_ev_icrxatc, CSR_READ(sc, WMREG_ICRXATC));
   3840 	WM_EVCNT_ADD(&sc->sc_ev_ictxptc, CSR_READ(sc, WMREG_ICTXPTC));
   3841 	WM_EVCNT_ADD(&sc->sc_ev_ictxact, CSR_READ(sc, WMREG_ICTXATC));
   3842 	WM_EVCNT_ADD(&sc->sc_ev_ictxqec, CSR_READ(sc, WMREG_ICTXQEC));
   3843 	WM_EVCNT_ADD(&sc->sc_ev_ictxqmtc, CSR_READ(sc, WMREG_ICTXQMTC));
   3844 	WM_EVCNT_ADD(&sc->sc_ev_icrxdmtc, CSR_READ(sc, WMREG_ICRXDMTC));
   3845 	WM_EVCNT_ADD(&sc->sc_ev_icrxoc, CSR_READ(sc, WMREG_ICRXOC));
   3846 
   3847 	if (sc->sc_type >= WM_T_82543) {
   3848 		algnerrc = CSR_READ(sc, WMREG_ALGNERRC);
   3849 		rxerrc = CSR_READ(sc, WMREG_RXERRC);
   3850 		cexterr = CSR_READ(sc, WMREG_CEXTERR);
   3851 		WM_EVCNT_ADD(&sc->sc_ev_algnerrc, algnerrc);
   3852 		WM_EVCNT_ADD(&sc->sc_ev_rxerrc, rxerrc);
   3853 		WM_EVCNT_ADD(&sc->sc_ev_cexterr, cexterr);
   3854 
   3855 		WM_EVCNT_ADD(&sc->sc_ev_tncrs, CSR_READ(sc, WMREG_TNCRS));
   3856 		WM_EVCNT_ADD(&sc->sc_ev_tsctc, CSR_READ(sc, WMREG_TSCTC));
   3857 		WM_EVCNT_ADD(&sc->sc_ev_tsctfc, CSR_READ(sc, WMREG_TSCTFC));
   3858 	} else
   3859 		algnerrc = rxerrc = cexterr = 0;
   3860 
   3861 	if (((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003))
   3862 	    && ((CSR_READ(sc, WMREG_MANC) & MANC_EN_BMC2OS) != 0)) {
   3863 		WM_EVCNT_ADD(&sc->sc_ev_b2ogprc, CSR_READ(sc, WMREG_B2OGPRC));
   3864 		WM_EVCNT_ADD(&sc->sc_ev_o2bspc, CSR_READ(sc, WMREG_O2BSPC));
   3865 		WM_EVCNT_ADD(&sc->sc_ev_b2ospc, CSR_READ(sc, WMREG_B2OSPC));
   3866 		WM_EVCNT_ADD(&sc->sc_ev_o2bgptc, CSR_READ(sc, WMREG_O2BGPTC));
   3867 	}
   3868 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   3869 	if_statadd_ref(nsr, if_collisions, colc);
   3870 	if_statadd_ref(nsr, if_ierrors,
   3871 	    crcerrs + algnerrc + symerrc + rxerrc + sec + cexterr + rlec);
   3872 	/*
   3873 	 * WMREG_RNBC is incremented when there are no available buffers in
   3874 	 * host memory. It does not mean the number of dropped packets, because
   3875 	 * an Ethernet controller can receive packets in such case if there is
   3876 	 * space in the phy's FIFO.
   3877 	 *
   3878 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3879 	 * own EVCNT instead of if_iqdrops.
   3880 	 */
   3881 	if_statadd_ref(nsr, if_iqdrops, mpc);
   3882 	IF_STAT_PUTREF(ifp);
   3883 
   3884 	if (sc->sc_flags & WM_F_HAS_MII)
   3885 		mii_tick(&sc->sc_mii);
   3886 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3887 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3888 		wm_serdes_tick(sc);
   3889 	else
   3890 		wm_tbi_tick(sc);
   3891 
   3892 	mutex_exit(sc->sc_core_lock);
   3893 
   3894 	if (wm_watchdog(ifp))
   3895 		callout_schedule(&sc->sc_tick_ch, hz);
   3896 }
   3897 
   3898 static int
   3899 wm_ifflags_cb(struct ethercom *ec)
   3900 {
   3901 	struct ifnet *ifp = &ec->ec_if;
   3902 	struct wm_softc *sc = ifp->if_softc;
   3903 	u_short iffchange;
   3904 	int ecchange;
   3905 	bool needreset = false;
   3906 	int rc = 0;
   3907 
   3908 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3909 		device_xname(sc->sc_dev), __func__));
   3910 
   3911 	KASSERT(IFNET_LOCKED(ifp));
   3912 
   3913 	mutex_enter(sc->sc_core_lock);
   3914 
   3915 	/*
   3916 	 * Check for if_flags.
   3917 	 * Main usage is to prevent linkdown when opening bpf.
   3918 	 */
   3919 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3920 	sc->sc_if_flags = ifp->if_flags;
   3921 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3922 		needreset = true;
   3923 		goto ec;
   3924 	}
   3925 
   3926 	/* iff related updates */
   3927 	if ((iffchange & IFF_PROMISC) != 0)
   3928 		wm_set_filter(sc);
   3929 
   3930 	wm_set_vlan(sc);
   3931 
   3932 ec:
   3933 	/* Check for ec_capenable. */
   3934 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3935 	sc->sc_ec_capenable = ec->ec_capenable;
   3936 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3937 		needreset = true;
   3938 		goto out;
   3939 	}
   3940 
   3941 	/* ec related updates */
   3942 	wm_set_eee(sc);
   3943 
   3944 out:
   3945 	if (needreset)
   3946 		rc = ENETRESET;
   3947 	mutex_exit(sc->sc_core_lock);
   3948 
   3949 	return rc;
   3950 }
   3951 
   3952 static bool
   3953 wm_phy_need_linkdown_discard(struct wm_softc *sc)
   3954 {
   3955 
   3956 	switch (sc->sc_phytype) {
   3957 	case WMPHY_82577: /* ihphy */
   3958 	case WMPHY_82578: /* atphy */
   3959 	case WMPHY_82579: /* ihphy */
   3960 	case WMPHY_I217: /* ihphy */
   3961 	case WMPHY_82580: /* ihphy */
   3962 	case WMPHY_I350: /* ihphy */
   3963 		return true;
   3964 	default:
   3965 		return false;
   3966 	}
   3967 }
   3968 
   3969 static void
   3970 wm_set_linkdown_discard(struct wm_softc *sc)
   3971 {
   3972 
   3973 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3974 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3975 
   3976 		mutex_enter(txq->txq_lock);
   3977 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
   3978 		mutex_exit(txq->txq_lock);
   3979 	}
   3980 }
   3981 
   3982 static void
   3983 wm_clear_linkdown_discard(struct wm_softc *sc)
   3984 {
   3985 
   3986 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3987 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3988 
   3989 		mutex_enter(txq->txq_lock);
   3990 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
   3991 		mutex_exit(txq->txq_lock);
   3992 	}
   3993 }
   3994 
   3995 /*
   3996  * wm_ioctl:		[ifnet interface function]
   3997  *
   3998  *	Handle control requests from the operator.
   3999  */
   4000 static int
   4001 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   4002 {
   4003 	struct wm_softc *sc = ifp->if_softc;
   4004 	struct ifreq *ifr = (struct ifreq *)data;
   4005 	struct ifaddr *ifa = (struct ifaddr *)data;
   4006 	struct sockaddr_dl *sdl;
   4007 	int error;
   4008 
   4009 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4010 		device_xname(sc->sc_dev), __func__));
   4011 
   4012 	switch (cmd) {
   4013 	case SIOCADDMULTI:
   4014 	case SIOCDELMULTI:
   4015 		break;
   4016 	default:
   4017 		KASSERT(IFNET_LOCKED(ifp));
   4018 	}
   4019 
   4020 	switch (cmd) {
   4021 	case SIOCSIFMEDIA:
   4022 		mutex_enter(sc->sc_core_lock);
   4023 		/* Flow control requires full-duplex mode. */
   4024 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   4025 		    (ifr->ifr_media & IFM_FDX) == 0)
   4026 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   4027 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   4028 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   4029 				/* We can do both TXPAUSE and RXPAUSE. */
   4030 				ifr->ifr_media |=
   4031 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   4032 			}
   4033 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   4034 		}
   4035 		mutex_exit(sc->sc_core_lock);
   4036 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   4037 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
   4038 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE) {
   4039 				DPRINTF(sc, WM_DEBUG_LINK,
   4040 				    ("%s: %s: Set linkdown discard flag\n",
   4041 					device_xname(sc->sc_dev), __func__));
   4042 				wm_set_linkdown_discard(sc);
   4043 			}
   4044 		}
   4045 		break;
   4046 	case SIOCINITIFADDR:
   4047 		mutex_enter(sc->sc_core_lock);
   4048 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   4049 			sdl = satosdl(ifp->if_dl->ifa_addr);
   4050 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   4051 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   4052 			/* Unicast address is the first multicast entry */
   4053 			wm_set_filter(sc);
   4054 			error = 0;
   4055 			mutex_exit(sc->sc_core_lock);
   4056 			break;
   4057 		}
   4058 		mutex_exit(sc->sc_core_lock);
   4059 		/*FALLTHROUGH*/
   4060 	default:
   4061 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
   4062 			if (((ifp->if_flags & IFF_UP) != 0) &&
   4063 			    ((ifr->ifr_flags & IFF_UP) == 0)) {
   4064 				DPRINTF(sc, WM_DEBUG_LINK,
   4065 				    ("%s: %s: Set linkdown discard flag\n",
   4066 					device_xname(sc->sc_dev), __func__));
   4067 				wm_set_linkdown_discard(sc);
   4068 			}
   4069 		}
   4070 		const int s = splnet();
   4071 		/* It may call wm_start, so unlock here */
   4072 		error = ether_ioctl(ifp, cmd, data);
   4073 		splx(s);
   4074 		if (error != ENETRESET)
   4075 			break;
   4076 
   4077 		error = 0;
   4078 
   4079 		if (cmd == SIOCSIFCAP)
   4080 			error = if_init(ifp);
   4081 		else if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
   4082 			mutex_enter(sc->sc_core_lock);
   4083 			if (sc->sc_if_flags & IFF_RUNNING) {
   4084 				/*
   4085 				 * Multicast list has changed; set the
   4086 				 * hardware filter accordingly.
   4087 				 */
   4088 				wm_set_filter(sc);
   4089 			}
   4090 			mutex_exit(sc->sc_core_lock);
   4091 		}
   4092 		break;
   4093 	}
   4094 
   4095 	return error;
   4096 }
   4097 
   4098 /* MAC address related */
   4099 
   4100 /*
   4101  * Get the offset of MAC address and return it.
   4102  * If error occured, use offset 0.
   4103  */
   4104 static uint16_t
   4105 wm_check_alt_mac_addr(struct wm_softc *sc)
   4106 {
   4107 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4108 	uint16_t offset = NVM_OFF_MACADDR;
   4109 
   4110 	/* Try to read alternative MAC address pointer */
   4111 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   4112 		return 0;
   4113 
   4114 	/* Check pointer if it's valid or not. */
   4115 	if ((offset == 0x0000) || (offset == 0xffff))
   4116 		return 0;
   4117 
   4118 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   4119 	/*
   4120 	 * Check whether alternative MAC address is valid or not.
   4121 	 * Some cards have non 0xffff pointer but those don't use
   4122 	 * alternative MAC address in reality.
   4123 	 *
   4124 	 * Check whether the broadcast bit is set or not.
   4125 	 */
   4126 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   4127 		if (((myea[0] & 0xff) & 0x01) == 0)
   4128 			return offset; /* Found */
   4129 
   4130 	/* Not found */
   4131 	return 0;
   4132 }
   4133 
   4134 static int
   4135 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   4136 {
   4137 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4138 	uint16_t offset = NVM_OFF_MACADDR;
   4139 	int do_invert = 0;
   4140 
   4141 	switch (sc->sc_type) {
   4142 	case WM_T_82580:
   4143 	case WM_T_I350:
   4144 	case WM_T_I354:
   4145 		/* EEPROM Top Level Partitioning */
   4146 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   4147 		break;
   4148 	case WM_T_82571:
   4149 	case WM_T_82575:
   4150 	case WM_T_82576:
   4151 	case WM_T_80003:
   4152 	case WM_T_I210:
   4153 	case WM_T_I211:
   4154 		offset = wm_check_alt_mac_addr(sc);
   4155 		if (offset == 0)
   4156 			if ((sc->sc_funcid & 0x01) == 1)
   4157 				do_invert = 1;
   4158 		break;
   4159 	default:
   4160 		if ((sc->sc_funcid & 0x01) == 1)
   4161 			do_invert = 1;
   4162 		break;
   4163 	}
   4164 
   4165 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   4166 		goto bad;
   4167 
   4168 	enaddr[0] = myea[0] & 0xff;
   4169 	enaddr[1] = myea[0] >> 8;
   4170 	enaddr[2] = myea[1] & 0xff;
   4171 	enaddr[3] = myea[1] >> 8;
   4172 	enaddr[4] = myea[2] & 0xff;
   4173 	enaddr[5] = myea[2] >> 8;
   4174 
   4175 	/*
   4176 	 * Toggle the LSB of the MAC address on the second port
   4177 	 * of some dual port cards.
   4178 	 */
   4179 	if (do_invert != 0)
   4180 		enaddr[5] ^= 1;
   4181 
   4182 	return 0;
   4183 
   4184 bad:
   4185 	return -1;
   4186 }
   4187 
   4188 /*
   4189  * wm_set_ral:
   4190  *
   4191  *	Set an entery in the receive address list.
   4192  */
   4193 static void
   4194 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   4195 {
   4196 	uint32_t ral_lo, ral_hi, addrl, addrh;
   4197 	uint32_t wlock_mac;
   4198 	int rv;
   4199 
   4200 	if (enaddr != NULL) {
   4201 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   4202 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   4203 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   4204 		ral_hi |= RAL_AV;
   4205 	} else {
   4206 		ral_lo = 0;
   4207 		ral_hi = 0;
   4208 	}
   4209 
   4210 	switch (sc->sc_type) {
   4211 	case WM_T_82542_2_0:
   4212 	case WM_T_82542_2_1:
   4213 	case WM_T_82543:
   4214 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   4215 		CSR_WRITE_FLUSH(sc);
   4216 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   4217 		CSR_WRITE_FLUSH(sc);
   4218 		break;
   4219 	case WM_T_PCH2:
   4220 	case WM_T_PCH_LPT:
   4221 	case WM_T_PCH_SPT:
   4222 	case WM_T_PCH_CNP:
   4223 		if (idx == 0) {
   4224 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4225 			CSR_WRITE_FLUSH(sc);
   4226 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4227 			CSR_WRITE_FLUSH(sc);
   4228 			return;
   4229 		}
   4230 		if (sc->sc_type != WM_T_PCH2) {
   4231 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   4232 			    FWSM_WLOCK_MAC);
   4233 			addrl = WMREG_SHRAL(idx - 1);
   4234 			addrh = WMREG_SHRAH(idx - 1);
   4235 		} else {
   4236 			wlock_mac = 0;
   4237 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   4238 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   4239 		}
   4240 
   4241 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   4242 			rv = wm_get_swflag_ich8lan(sc);
   4243 			if (rv != 0)
   4244 				return;
   4245 			CSR_WRITE(sc, addrl, ral_lo);
   4246 			CSR_WRITE_FLUSH(sc);
   4247 			CSR_WRITE(sc, addrh, ral_hi);
   4248 			CSR_WRITE_FLUSH(sc);
   4249 			wm_put_swflag_ich8lan(sc);
   4250 		}
   4251 
   4252 		break;
   4253 	default:
   4254 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4255 		CSR_WRITE_FLUSH(sc);
   4256 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4257 		CSR_WRITE_FLUSH(sc);
   4258 		break;
   4259 	}
   4260 }
   4261 
   4262 /*
   4263  * wm_mchash:
   4264  *
   4265  *	Compute the hash of the multicast address for the 4096-bit
   4266  *	multicast filter.
   4267  */
   4268 static uint32_t
   4269 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   4270 {
   4271 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   4272 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   4273 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   4274 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   4275 	uint32_t hash;
   4276 
   4277 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4278 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4279 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4280 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   4281 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   4282 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   4283 		return (hash & 0x3ff);
   4284 	}
   4285 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   4286 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   4287 
   4288 	return (hash & 0xfff);
   4289 }
   4290 
   4291 /*
   4292  *
   4293  *
   4294  */
   4295 static int
   4296 wm_rar_count(struct wm_softc *sc)
   4297 {
   4298 	int size;
   4299 
   4300 	switch (sc->sc_type) {
   4301 	case WM_T_ICH8:
   4302 		size = WM_RAL_TABSIZE_ICH8 -1;
   4303 		break;
   4304 	case WM_T_ICH9:
   4305 	case WM_T_ICH10:
   4306 	case WM_T_PCH:
   4307 		size = WM_RAL_TABSIZE_ICH8;
   4308 		break;
   4309 	case WM_T_PCH2:
   4310 		size = WM_RAL_TABSIZE_PCH2;
   4311 		break;
   4312 	case WM_T_PCH_LPT:
   4313 	case WM_T_PCH_SPT:
   4314 	case WM_T_PCH_CNP:
   4315 		size = WM_RAL_TABSIZE_PCH_LPT;
   4316 		break;
   4317 	case WM_T_82575:
   4318 	case WM_T_I210:
   4319 	case WM_T_I211:
   4320 		size = WM_RAL_TABSIZE_82575;
   4321 		break;
   4322 	case WM_T_82576:
   4323 	case WM_T_82580:
   4324 		size = WM_RAL_TABSIZE_82576;
   4325 		break;
   4326 	case WM_T_I350:
   4327 	case WM_T_I354:
   4328 		size = WM_RAL_TABSIZE_I350;
   4329 		break;
   4330 	default:
   4331 		size = WM_RAL_TABSIZE;
   4332 	}
   4333 
   4334 	return size;
   4335 }
   4336 
   4337 /*
   4338  * wm_set_filter:
   4339  *
   4340  *	Set up the receive filter.
   4341  */
   4342 static void
   4343 wm_set_filter(struct wm_softc *sc)
   4344 {
   4345 	struct ethercom *ec = &sc->sc_ethercom;
   4346 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   4347 	struct ether_multi *enm;
   4348 	struct ether_multistep step;
   4349 	bus_addr_t mta_reg;
   4350 	uint32_t hash, reg, bit;
   4351 	int i, size, ralmax, rv;
   4352 
   4353 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4354 		device_xname(sc->sc_dev), __func__));
   4355 	KASSERT(mutex_owned(sc->sc_core_lock));
   4356 
   4357 	if (sc->sc_type >= WM_T_82544)
   4358 		mta_reg = WMREG_CORDOVA_MTA;
   4359 	else
   4360 		mta_reg = WMREG_MTA;
   4361 
   4362 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   4363 
   4364 	if (sc->sc_if_flags & IFF_BROADCAST)
   4365 		sc->sc_rctl |= RCTL_BAM;
   4366 	if (sc->sc_if_flags & IFF_PROMISC) {
   4367 		sc->sc_rctl |= RCTL_UPE;
   4368 		ETHER_LOCK(ec);
   4369 		ec->ec_flags |= ETHER_F_ALLMULTI;
   4370 		ETHER_UNLOCK(ec);
   4371 		goto allmulti;
   4372 	}
   4373 
   4374 	/*
   4375 	 * Set the station address in the first RAL slot, and
   4376 	 * clear the remaining slots.
   4377 	 */
   4378 	size = wm_rar_count(sc);
   4379 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   4380 
   4381 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   4382 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   4383 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   4384 		switch (i) {
   4385 		case 0:
   4386 			/* We can use all entries */
   4387 			ralmax = size;
   4388 			break;
   4389 		case 1:
   4390 			/* Only RAR[0] */
   4391 			ralmax = 1;
   4392 			break;
   4393 		default:
   4394 			/* Available SHRA + RAR[0] */
   4395 			ralmax = i + 1;
   4396 		}
   4397 	} else
   4398 		ralmax = size;
   4399 	for (i = 1; i < size; i++) {
   4400 		if (i < ralmax)
   4401 			wm_set_ral(sc, NULL, i);
   4402 	}
   4403 
   4404 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4405 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4406 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4407 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   4408 		size = WM_ICH8_MC_TABSIZE;
   4409 	else
   4410 		size = WM_MC_TABSIZE;
   4411 	/* Clear out the multicast table. */
   4412 	for (i = 0; i < size; i++) {
   4413 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   4414 		CSR_WRITE_FLUSH(sc);
   4415 	}
   4416 
   4417 	ETHER_LOCK(ec);
   4418 	ETHER_FIRST_MULTI(step, ec, enm);
   4419 	while (enm != NULL) {
   4420 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   4421 			ec->ec_flags |= ETHER_F_ALLMULTI;
   4422 			ETHER_UNLOCK(ec);
   4423 			/*
   4424 			 * We must listen to a range of multicast addresses.
   4425 			 * For now, just accept all multicasts, rather than
   4426 			 * trying to set only those filter bits needed to match
   4427 			 * the range.  (At this time, the only use of address
   4428 			 * ranges is for IP multicast routing, for which the
   4429 			 * range is big enough to require all bits set.)
   4430 			 */
   4431 			goto allmulti;
   4432 		}
   4433 
   4434 		hash = wm_mchash(sc, enm->enm_addrlo);
   4435 
   4436 		reg = (hash >> 5);
   4437 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4438 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4439 		    || (sc->sc_type == WM_T_PCH2)
   4440 		    || (sc->sc_type == WM_T_PCH_LPT)
   4441 		    || (sc->sc_type == WM_T_PCH_SPT)
   4442 		    || (sc->sc_type == WM_T_PCH_CNP))
   4443 			reg &= 0x1f;
   4444 		else
   4445 			reg &= 0x7f;
   4446 		bit = hash & 0x1f;
   4447 
   4448 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   4449 		hash |= 1U << bit;
   4450 
   4451 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   4452 			/*
   4453 			 * 82544 Errata 9: Certain register cannot be written
   4454 			 * with particular alignments in PCI-X bus operation
   4455 			 * (FCAH, MTA and VFTA).
   4456 			 */
   4457 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   4458 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4459 			CSR_WRITE_FLUSH(sc);
   4460 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   4461 			CSR_WRITE_FLUSH(sc);
   4462 		} else {
   4463 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4464 			CSR_WRITE_FLUSH(sc);
   4465 		}
   4466 
   4467 		ETHER_NEXT_MULTI(step, enm);
   4468 	}
   4469 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4470 	ETHER_UNLOCK(ec);
   4471 
   4472 	goto setit;
   4473 
   4474 allmulti:
   4475 	sc->sc_rctl |= RCTL_MPE;
   4476 
   4477 setit:
   4478 	if (sc->sc_type >= WM_T_PCH2) {
   4479 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4480 		    && (ifp->if_mtu > ETHERMTU))
   4481 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
   4482 		else
   4483 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
   4484 		if (rv != 0)
   4485 			device_printf(sc->sc_dev,
   4486 			    "Failed to do workaround for jumbo frame.\n");
   4487 	}
   4488 
   4489 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   4490 }
   4491 
   4492 /* Reset and init related */
   4493 
   4494 static void
   4495 wm_set_vlan(struct wm_softc *sc)
   4496 {
   4497 
   4498 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4499 		device_xname(sc->sc_dev), __func__));
   4500 
   4501 	/* Deal with VLAN enables. */
   4502 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   4503 		sc->sc_ctrl |= CTRL_VME;
   4504 	else
   4505 		sc->sc_ctrl &= ~CTRL_VME;
   4506 
   4507 	/* Write the control registers. */
   4508 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4509 }
   4510 
   4511 static void
   4512 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   4513 {
   4514 	uint32_t gcr;
   4515 	pcireg_t ctrl2;
   4516 
   4517 	gcr = CSR_READ(sc, WMREG_GCR);
   4518 
   4519 	/* Only take action if timeout value is defaulted to 0 */
   4520 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   4521 		goto out;
   4522 
   4523 	if ((gcr & GCR_CAP_VER2) == 0) {
   4524 		gcr |= GCR_CMPL_TMOUT_10MS;
   4525 		goto out;
   4526 	}
   4527 
   4528 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4529 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   4530 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   4531 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4532 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   4533 
   4534 out:
   4535 	/* Disable completion timeout resend */
   4536 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   4537 
   4538 	CSR_WRITE(sc, WMREG_GCR, gcr);
   4539 }
   4540 
   4541 void
   4542 wm_get_auto_rd_done(struct wm_softc *sc)
   4543 {
   4544 	int i;
   4545 
   4546 	/* wait for eeprom to reload */
   4547 	switch (sc->sc_type) {
   4548 	case WM_T_82571:
   4549 	case WM_T_82572:
   4550 	case WM_T_82573:
   4551 	case WM_T_82574:
   4552 	case WM_T_82583:
   4553 	case WM_T_82575:
   4554 	case WM_T_82576:
   4555 	case WM_T_82580:
   4556 	case WM_T_I350:
   4557 	case WM_T_I354:
   4558 	case WM_T_I210:
   4559 	case WM_T_I211:
   4560 	case WM_T_80003:
   4561 	case WM_T_ICH8:
   4562 	case WM_T_ICH9:
   4563 		for (i = 0; i < 10; i++) {
   4564 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   4565 				break;
   4566 			delay(1000);
   4567 		}
   4568 		if (i == 10) {
   4569 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4570 			    "complete\n", device_xname(sc->sc_dev));
   4571 		}
   4572 		break;
   4573 	default:
   4574 		break;
   4575 	}
   4576 }
   4577 
   4578 void
   4579 wm_lan_init_done(struct wm_softc *sc)
   4580 {
   4581 	uint32_t reg = 0;
   4582 	int i;
   4583 
   4584 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4585 		device_xname(sc->sc_dev), __func__));
   4586 
   4587 	/* Wait for eeprom to reload */
   4588 	switch (sc->sc_type) {
   4589 	case WM_T_ICH10:
   4590 	case WM_T_PCH:
   4591 	case WM_T_PCH2:
   4592 	case WM_T_PCH_LPT:
   4593 	case WM_T_PCH_SPT:
   4594 	case WM_T_PCH_CNP:
   4595 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4596 			reg = CSR_READ(sc, WMREG_STATUS);
   4597 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4598 				break;
   4599 			delay(100);
   4600 		}
   4601 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4602 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4603 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4604 		}
   4605 		break;
   4606 	default:
   4607 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4608 		    __func__);
   4609 		break;
   4610 	}
   4611 
   4612 	reg &= ~STATUS_LAN_INIT_DONE;
   4613 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4614 }
   4615 
   4616 void
   4617 wm_get_cfg_done(struct wm_softc *sc)
   4618 {
   4619 	int mask;
   4620 	uint32_t reg;
   4621 	int i;
   4622 
   4623 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4624 		device_xname(sc->sc_dev), __func__));
   4625 
   4626 	/* Wait for eeprom to reload */
   4627 	switch (sc->sc_type) {
   4628 	case WM_T_82542_2_0:
   4629 	case WM_T_82542_2_1:
   4630 		/* null */
   4631 		break;
   4632 	case WM_T_82543:
   4633 	case WM_T_82544:
   4634 	case WM_T_82540:
   4635 	case WM_T_82545:
   4636 	case WM_T_82545_3:
   4637 	case WM_T_82546:
   4638 	case WM_T_82546_3:
   4639 	case WM_T_82541:
   4640 	case WM_T_82541_2:
   4641 	case WM_T_82547:
   4642 	case WM_T_82547_2:
   4643 	case WM_T_82573:
   4644 	case WM_T_82574:
   4645 	case WM_T_82583:
   4646 		/* generic */
   4647 		delay(10*1000);
   4648 		break;
   4649 	case WM_T_80003:
   4650 	case WM_T_82571:
   4651 	case WM_T_82572:
   4652 	case WM_T_82575:
   4653 	case WM_T_82576:
   4654 	case WM_T_82580:
   4655 	case WM_T_I350:
   4656 	case WM_T_I354:
   4657 	case WM_T_I210:
   4658 	case WM_T_I211:
   4659 		if (sc->sc_type == WM_T_82571) {
   4660 			/* Only 82571 shares port 0 */
   4661 			mask = EEMNGCTL_CFGDONE_0;
   4662 		} else
   4663 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4664 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4665 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4666 				break;
   4667 			delay(1000);
   4668 		}
   4669 		if (i >= WM_PHY_CFG_TIMEOUT)
   4670 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
   4671 				device_xname(sc->sc_dev), __func__));
   4672 		break;
   4673 	case WM_T_ICH8:
   4674 	case WM_T_ICH9:
   4675 	case WM_T_ICH10:
   4676 	case WM_T_PCH:
   4677 	case WM_T_PCH2:
   4678 	case WM_T_PCH_LPT:
   4679 	case WM_T_PCH_SPT:
   4680 	case WM_T_PCH_CNP:
   4681 		delay(10*1000);
   4682 		if (sc->sc_type >= WM_T_ICH10)
   4683 			wm_lan_init_done(sc);
   4684 		else
   4685 			wm_get_auto_rd_done(sc);
   4686 
   4687 		/* Clear PHY Reset Asserted bit */
   4688 		reg = CSR_READ(sc, WMREG_STATUS);
   4689 		if ((reg & STATUS_PHYRA) != 0)
   4690 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4691 		break;
   4692 	default:
   4693 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4694 		    __func__);
   4695 		break;
   4696 	}
   4697 }
   4698 
   4699 int
   4700 wm_phy_post_reset(struct wm_softc *sc)
   4701 {
   4702 	device_t dev = sc->sc_dev;
   4703 	uint16_t reg;
   4704 	int rv = 0;
   4705 
   4706 	/* This function is only for ICH8 and newer. */
   4707 	if (sc->sc_type < WM_T_ICH8)
   4708 		return 0;
   4709 
   4710 	if (wm_phy_resetisblocked(sc)) {
   4711 		/* XXX */
   4712 		device_printf(dev, "PHY is blocked\n");
   4713 		return -1;
   4714 	}
   4715 
   4716 	/* Allow time for h/w to get to quiescent state after reset */
   4717 	delay(10*1000);
   4718 
   4719 	/* Perform any necessary post-reset workarounds */
   4720 	if (sc->sc_type == WM_T_PCH)
   4721 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4722 	else if (sc->sc_type == WM_T_PCH2)
   4723 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4724 	if (rv != 0)
   4725 		return rv;
   4726 
   4727 	/* Clear the host wakeup bit after lcd reset */
   4728 	if (sc->sc_type >= WM_T_PCH) {
   4729 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4730 		reg &= ~BM_WUC_HOST_WU_BIT;
   4731 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4732 	}
   4733 
   4734 	/* Configure the LCD with the extended configuration region in NVM */
   4735 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4736 		return rv;
   4737 
   4738 	/* Configure the LCD with the OEM bits in NVM */
   4739 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4740 
   4741 	if (sc->sc_type == WM_T_PCH2) {
   4742 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4743 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4744 			delay(10 * 1000);
   4745 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4746 		}
   4747 		/* Set EEE LPI Update Timer to 200usec */
   4748 		rv = sc->phy.acquire(sc);
   4749 		if (rv)
   4750 			return rv;
   4751 		rv = wm_write_emi_reg_locked(dev,
   4752 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4753 		sc->phy.release(sc);
   4754 	}
   4755 
   4756 	return rv;
   4757 }
   4758 
   4759 /* Only for PCH and newer */
   4760 static int
   4761 wm_write_smbus_addr(struct wm_softc *sc)
   4762 {
   4763 	uint32_t strap, freq;
   4764 	uint16_t phy_data;
   4765 	int rv;
   4766 
   4767 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4768 		device_xname(sc->sc_dev), __func__));
   4769 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4770 
   4771 	strap = CSR_READ(sc, WMREG_STRAP);
   4772 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4773 
   4774 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4775 	if (rv != 0)
   4776 		return rv;
   4777 
   4778 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4779 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4780 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4781 
   4782 	if (sc->sc_phytype == WMPHY_I217) {
   4783 		/* Restore SMBus frequency */
   4784 		if (freq --) {
   4785 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4786 			    | HV_SMB_ADDR_FREQ_HIGH);
   4787 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4788 			    HV_SMB_ADDR_FREQ_LOW);
   4789 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4790 			    HV_SMB_ADDR_FREQ_HIGH);
   4791 		} else
   4792 			DPRINTF(sc, WM_DEBUG_INIT,
   4793 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4794 				device_xname(sc->sc_dev), __func__));
   4795 	}
   4796 
   4797 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4798 	    phy_data);
   4799 }
   4800 
   4801 static int
   4802 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4803 {
   4804 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4805 	uint16_t phy_page = 0;
   4806 	int rv = 0;
   4807 
   4808 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4809 		device_xname(sc->sc_dev), __func__));
   4810 
   4811 	switch (sc->sc_type) {
   4812 	case WM_T_ICH8:
   4813 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4814 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4815 			return 0;
   4816 
   4817 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4818 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4819 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4820 			break;
   4821 		}
   4822 		/* FALLTHROUGH */
   4823 	case WM_T_PCH:
   4824 	case WM_T_PCH2:
   4825 	case WM_T_PCH_LPT:
   4826 	case WM_T_PCH_SPT:
   4827 	case WM_T_PCH_CNP:
   4828 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4829 		break;
   4830 	default:
   4831 		return 0;
   4832 	}
   4833 
   4834 	if ((rv = sc->phy.acquire(sc)) != 0)
   4835 		return rv;
   4836 
   4837 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4838 	if ((reg & sw_cfg_mask) == 0)
   4839 		goto release;
   4840 
   4841 	/*
   4842 	 * Make sure HW does not configure LCD from PHY extended configuration
   4843 	 * before SW configuration
   4844 	 */
   4845 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4846 	if ((sc->sc_type < WM_T_PCH2)
   4847 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4848 		goto release;
   4849 
   4850 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4851 		device_xname(sc->sc_dev), __func__));
   4852 	/* word_addr is in DWORD */
   4853 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4854 
   4855 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4856 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4857 	if (cnf_size == 0)
   4858 		goto release;
   4859 
   4860 	if (((sc->sc_type == WM_T_PCH)
   4861 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4862 	    || (sc->sc_type > WM_T_PCH)) {
   4863 		/*
   4864 		 * HW configures the SMBus address and LEDs when the OEM and
   4865 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4866 		 * are cleared, SW will configure them instead.
   4867 		 */
   4868 		DPRINTF(sc, WM_DEBUG_INIT,
   4869 		    ("%s: %s: Configure SMBus and LED\n",
   4870 			device_xname(sc->sc_dev), __func__));
   4871 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4872 			goto release;
   4873 
   4874 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4875 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4876 		    (uint16_t)reg);
   4877 		if (rv != 0)
   4878 			goto release;
   4879 	}
   4880 
   4881 	/* Configure LCD from extended configuration region. */
   4882 	for (i = 0; i < cnf_size; i++) {
   4883 		uint16_t reg_data, reg_addr;
   4884 
   4885 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4886 			goto release;
   4887 
   4888 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4889 			goto release;
   4890 
   4891 		if (reg_addr == IGPHY_PAGE_SELECT)
   4892 			phy_page = reg_data;
   4893 
   4894 		reg_addr &= IGPHY_MAXREGADDR;
   4895 		reg_addr |= phy_page;
   4896 
   4897 		KASSERT(sc->phy.writereg_locked != NULL);
   4898 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4899 		    reg_data);
   4900 	}
   4901 
   4902 release:
   4903 	sc->phy.release(sc);
   4904 	return rv;
   4905 }
   4906 
   4907 /*
   4908  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4909  *  @sc:       pointer to the HW structure
   4910  *  @d0_state: boolean if entering d0 or d3 device state
   4911  *
   4912  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4913  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4914  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4915  */
   4916 int
   4917 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4918 {
   4919 	uint32_t mac_reg;
   4920 	uint16_t oem_reg;
   4921 	int rv;
   4922 
   4923 	if (sc->sc_type < WM_T_PCH)
   4924 		return 0;
   4925 
   4926 	rv = sc->phy.acquire(sc);
   4927 	if (rv != 0)
   4928 		return rv;
   4929 
   4930 	if (sc->sc_type == WM_T_PCH) {
   4931 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4932 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4933 			goto release;
   4934 	}
   4935 
   4936 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4937 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4938 		goto release;
   4939 
   4940 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4941 
   4942 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4943 	if (rv != 0)
   4944 		goto release;
   4945 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4946 
   4947 	if (d0_state) {
   4948 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4949 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4950 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4951 			oem_reg |= HV_OEM_BITS_LPLU;
   4952 	} else {
   4953 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4954 		    != 0)
   4955 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4956 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4957 		    != 0)
   4958 			oem_reg |= HV_OEM_BITS_LPLU;
   4959 	}
   4960 
   4961 	/* Set Restart auto-neg to activate the bits */
   4962 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4963 	    && (wm_phy_resetisblocked(sc) == false))
   4964 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4965 
   4966 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4967 
   4968 release:
   4969 	sc->phy.release(sc);
   4970 
   4971 	return rv;
   4972 }
   4973 
   4974 /* Init hardware bits */
   4975 void
   4976 wm_initialize_hardware_bits(struct wm_softc *sc)
   4977 {
   4978 	uint32_t tarc0, tarc1, reg;
   4979 
   4980 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4981 		device_xname(sc->sc_dev), __func__));
   4982 
   4983 	/* For 82571 variant, 80003 and ICHs */
   4984 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4985 	    || (sc->sc_type >= WM_T_80003)) {
   4986 
   4987 		/* Transmit Descriptor Control 0 */
   4988 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4989 		reg |= TXDCTL_COUNT_DESC;
   4990 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4991 
   4992 		/* Transmit Descriptor Control 1 */
   4993 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4994 		reg |= TXDCTL_COUNT_DESC;
   4995 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4996 
   4997 		/* TARC0 */
   4998 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4999 		switch (sc->sc_type) {
   5000 		case WM_T_82571:
   5001 		case WM_T_82572:
   5002 		case WM_T_82573:
   5003 		case WM_T_82574:
   5004 		case WM_T_82583:
   5005 		case WM_T_80003:
   5006 			/* Clear bits 30..27 */
   5007 			tarc0 &= ~__BITS(30, 27);
   5008 			break;
   5009 		default:
   5010 			break;
   5011 		}
   5012 
   5013 		switch (sc->sc_type) {
   5014 		case WM_T_82571:
   5015 		case WM_T_82572:
   5016 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   5017 
   5018 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5019 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   5020 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   5021 			/* 8257[12] Errata No.7 */
   5022 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   5023 
   5024 			/* TARC1 bit 28 */
   5025 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5026 				tarc1 &= ~__BIT(28);
   5027 			else
   5028 				tarc1 |= __BIT(28);
   5029 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5030 
   5031 			/*
   5032 			 * 8257[12] Errata No.13
   5033 			 * Disable Dyamic Clock Gating.
   5034 			 */
   5035 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5036 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   5037 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5038 			break;
   5039 		case WM_T_82573:
   5040 		case WM_T_82574:
   5041 		case WM_T_82583:
   5042 			if ((sc->sc_type == WM_T_82574)
   5043 			    || (sc->sc_type == WM_T_82583))
   5044 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   5045 
   5046 			/* Extended Device Control */
   5047 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5048 			reg &= ~__BIT(23);	/* Clear bit 23 */
   5049 			reg |= __BIT(22);	/* Set bit 22 */
   5050 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5051 
   5052 			/* Device Control */
   5053 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   5054 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5055 
   5056 			/* PCIe Control Register */
   5057 			/*
   5058 			 * 82573 Errata (unknown).
   5059 			 *
   5060 			 * 82574 Errata 25 and 82583 Errata 12
   5061 			 * "Dropped Rx Packets":
   5062 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   5063 			 */
   5064 			reg = CSR_READ(sc, WMREG_GCR);
   5065 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   5066 			CSR_WRITE(sc, WMREG_GCR, reg);
   5067 
   5068 			if ((sc->sc_type == WM_T_82574)
   5069 			    || (sc->sc_type == WM_T_82583)) {
   5070 				/*
   5071 				 * Document says this bit must be set for
   5072 				 * proper operation.
   5073 				 */
   5074 				reg = CSR_READ(sc, WMREG_GCR);
   5075 				reg |= __BIT(22);
   5076 				CSR_WRITE(sc, WMREG_GCR, reg);
   5077 
   5078 				/*
   5079 				 * Apply workaround for hardware errata
   5080 				 * documented in errata docs Fixes issue where
   5081 				 * some error prone or unreliable PCIe
   5082 				 * completions are occurring, particularly
   5083 				 * with ASPM enabled. Without fix, issue can
   5084 				 * cause Tx timeouts.
   5085 				 */
   5086 				reg = CSR_READ(sc, WMREG_GCR2);
   5087 				reg |= __BIT(0);
   5088 				CSR_WRITE(sc, WMREG_GCR2, reg);
   5089 			}
   5090 			break;
   5091 		case WM_T_80003:
   5092 			/* TARC0 */
   5093 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   5094 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   5095 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   5096 
   5097 			/* TARC1 bit 28 */
   5098 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5099 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5100 				tarc1 &= ~__BIT(28);
   5101 			else
   5102 				tarc1 |= __BIT(28);
   5103 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5104 			break;
   5105 		case WM_T_ICH8:
   5106 		case WM_T_ICH9:
   5107 		case WM_T_ICH10:
   5108 		case WM_T_PCH:
   5109 		case WM_T_PCH2:
   5110 		case WM_T_PCH_LPT:
   5111 		case WM_T_PCH_SPT:
   5112 		case WM_T_PCH_CNP:
   5113 			/* TARC0 */
   5114 			if (sc->sc_type == WM_T_ICH8) {
   5115 				/* Set TARC0 bits 29 and 28 */
   5116 				tarc0 |= __BITS(29, 28);
   5117 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   5118 				tarc0 |= __BIT(29);
   5119 				/*
   5120 				 *  Drop bit 28. From Linux.
   5121 				 * See I218/I219 spec update
   5122 				 * "5. Buffer Overrun While the I219 is
   5123 				 * Processing DMA Transactions"
   5124 				 */
   5125 				tarc0 &= ~__BIT(28);
   5126 			}
   5127 			/* Set TARC0 bits 23,24,26,27 */
   5128 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   5129 
   5130 			/* CTRL_EXT */
   5131 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5132 			reg |= __BIT(22);	/* Set bit 22 */
   5133 			/*
   5134 			 * Enable PHY low-power state when MAC is at D3
   5135 			 * w/o WoL
   5136 			 */
   5137 			if (sc->sc_type >= WM_T_PCH)
   5138 				reg |= CTRL_EXT_PHYPDEN;
   5139 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5140 
   5141 			/* TARC1 */
   5142 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5143 			/* bit 28 */
   5144 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5145 				tarc1 &= ~__BIT(28);
   5146 			else
   5147 				tarc1 |= __BIT(28);
   5148 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   5149 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5150 
   5151 			/* Device Status */
   5152 			if (sc->sc_type == WM_T_ICH8) {
   5153 				reg = CSR_READ(sc, WMREG_STATUS);
   5154 				reg &= ~__BIT(31);
   5155 				CSR_WRITE(sc, WMREG_STATUS, reg);
   5156 
   5157 			}
   5158 
   5159 			/* IOSFPC */
   5160 			if (sc->sc_type == WM_T_PCH_SPT) {
   5161 				reg = CSR_READ(sc, WMREG_IOSFPC);
   5162 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   5163 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   5164 			}
   5165 			/*
   5166 			 * Work-around descriptor data corruption issue during
   5167 			 * NFS v2 UDP traffic, just disable the NFS filtering
   5168 			 * capability.
   5169 			 */
   5170 			reg = CSR_READ(sc, WMREG_RFCTL);
   5171 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   5172 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5173 			break;
   5174 		default:
   5175 			break;
   5176 		}
   5177 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   5178 
   5179 		switch (sc->sc_type) {
   5180 		case WM_T_82571:
   5181 		case WM_T_82572:
   5182 		case WM_T_82573:
   5183 		case WM_T_80003:
   5184 		case WM_T_ICH8:
   5185 			/*
   5186 			 * 8257[12] Errata No.52, 82573 Errata No.43 and some
   5187 			 * others to avoid RSS Hash Value bug.
   5188 			 */
   5189 			reg = CSR_READ(sc, WMREG_RFCTL);
   5190 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   5191 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5192 			break;
   5193 		case WM_T_82574:
   5194 			/* Use extened Rx descriptor. */
   5195 			reg = CSR_READ(sc, WMREG_RFCTL);
   5196 			reg |= WMREG_RFCTL_EXSTEN;
   5197 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5198 			break;
   5199 		default:
   5200 			break;
   5201 		}
   5202 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   5203 		/*
   5204 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   5205 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   5206 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   5207 		 * Correctly by the Device"
   5208 		 *
   5209 		 * I354(C2000) Errata AVR53:
   5210 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   5211 		 * Hang"
   5212 		 */
   5213 		reg = CSR_READ(sc, WMREG_RFCTL);
   5214 		reg |= WMREG_RFCTL_IPV6EXDIS;
   5215 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   5216 	}
   5217 }
   5218 
   5219 static uint32_t
   5220 wm_rxpbs_adjust_82580(uint32_t val)
   5221 {
   5222 	uint32_t rv = 0;
   5223 
   5224 	if (val < __arraycount(wm_82580_rxpbs_table))
   5225 		rv = wm_82580_rxpbs_table[val];
   5226 
   5227 	return rv;
   5228 }
   5229 
   5230 /*
   5231  * wm_reset_phy:
   5232  *
   5233  *	generic PHY reset function.
   5234  *	Same as e1000_phy_hw_reset_generic()
   5235  */
   5236 static int
   5237 wm_reset_phy(struct wm_softc *sc)
   5238 {
   5239 	uint32_t reg;
   5240 	int rv;
   5241 
   5242 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5243 		device_xname(sc->sc_dev), __func__));
   5244 	if (wm_phy_resetisblocked(sc))
   5245 		return -1;
   5246 
   5247 	rv = sc->phy.acquire(sc);
   5248 	if (rv) {
   5249 		device_printf(sc->sc_dev, "%s: failed to acquire phy: %d\n",
   5250 		    __func__, rv);
   5251 		return rv;
   5252 	}
   5253 
   5254 	reg = CSR_READ(sc, WMREG_CTRL);
   5255 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   5256 	CSR_WRITE_FLUSH(sc);
   5257 
   5258 	delay(sc->phy.reset_delay_us);
   5259 
   5260 	CSR_WRITE(sc, WMREG_CTRL, reg);
   5261 	CSR_WRITE_FLUSH(sc);
   5262 
   5263 	delay(150);
   5264 
   5265 	sc->phy.release(sc);
   5266 
   5267 	wm_get_cfg_done(sc);
   5268 	wm_phy_post_reset(sc);
   5269 
   5270 	return 0;
   5271 }
   5272 
   5273 /*
   5274  * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
   5275  *
   5276  * In i219, the descriptor rings must be emptied before resetting the HW
   5277  * or before changing the device state to D3 during runtime (runtime PM).
   5278  *
   5279  * Failure to do this will cause the HW to enter a unit hang state which can
   5280  * only be released by PCI reset on the device.
   5281  *
   5282  * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
   5283  */
   5284 static void
   5285 wm_flush_desc_rings(struct wm_softc *sc)
   5286 {
   5287 	pcireg_t preg;
   5288 	uint32_t reg;
   5289 	struct wm_txqueue *txq;
   5290 	wiseman_txdesc_t *txd;
   5291 	int nexttx;
   5292 	uint32_t rctl;
   5293 
   5294 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
   5295 
   5296 	/* First, disable MULR fix in FEXTNVM11 */
   5297 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   5298 	reg |= FEXTNVM11_DIS_MULRFIX;
   5299 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   5300 
   5301 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5302 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   5303 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   5304 		return;
   5305 
   5306 	/*
   5307 	 * Remove all descriptors from the tx_ring.
   5308 	 *
   5309 	 * We want to clear all pending descriptors from the TX ring. Zeroing
   5310 	 * happens when the HW reads the regs. We assign the ring itself as
   5311 	 * the data of the next descriptor. We don't care about the data we are
   5312 	 * about to reset the HW.
   5313 	 */
   5314 #ifdef WM_DEBUG
   5315 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
   5316 #endif
   5317 	reg = CSR_READ(sc, WMREG_TCTL);
   5318 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   5319 
   5320 	txq = &sc->sc_queue[0].wmq_txq;
   5321 	nexttx = txq->txq_next;
   5322 	txd = &txq->txq_descs[nexttx];
   5323 	wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
   5324 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   5325 	txd->wtx_fields.wtxu_status = 0;
   5326 	txd->wtx_fields.wtxu_options = 0;
   5327 	txd->wtx_fields.wtxu_vlan = 0;
   5328 
   5329 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5330 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5331 
   5332 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   5333 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   5334 	CSR_WRITE_FLUSH(sc);
   5335 	delay(250);
   5336 
   5337 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5338 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   5339 		return;
   5340 
   5341 	/*
   5342 	 * Mark all descriptors in the RX ring as consumed and disable the
   5343 	 * rx ring.
   5344 	 */
   5345 #ifdef WM_DEBUG
   5346 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   5347 #endif
   5348 	rctl = CSR_READ(sc, WMREG_RCTL);
   5349 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5350 	CSR_WRITE_FLUSH(sc);
   5351 	delay(150);
   5352 
   5353 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   5354 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   5355 	reg &= 0xffffc000;
   5356 	/*
   5357 	 * Update thresholds: prefetch threshold to 31, host threshold
   5358 	 * to 1 and make sure the granularity is "descriptors" and not
   5359 	 * "cache lines"
   5360 	 */
   5361 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   5362 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   5363 
   5364 	/* Momentarily enable the RX ring for the changes to take effect */
   5365 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   5366 	CSR_WRITE_FLUSH(sc);
   5367 	delay(150);
   5368 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5369 }
   5370 
   5371 /*
   5372  * wm_reset:
   5373  *
   5374  *	Reset the i82542 chip.
   5375  */
   5376 static void
   5377 wm_reset(struct wm_softc *sc)
   5378 {
   5379 	int phy_reset = 0;
   5380 	int i, error = 0;
   5381 	uint32_t reg;
   5382 	uint16_t kmreg;
   5383 	int rv;
   5384 
   5385 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5386 		device_xname(sc->sc_dev), __func__));
   5387 	KASSERT(sc->sc_type != 0);
   5388 
   5389 	/*
   5390 	 * Allocate on-chip memory according to the MTU size.
   5391 	 * The Packet Buffer Allocation register must be written
   5392 	 * before the chip is reset.
   5393 	 */
   5394 	switch (sc->sc_type) {
   5395 	case WM_T_82547:
   5396 	case WM_T_82547_2:
   5397 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5398 		    PBA_22K : PBA_30K;
   5399 		for (i = 0; i < sc->sc_nqueues; i++) {
   5400 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5401 			txq->txq_fifo_head = 0;
   5402 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   5403 			txq->txq_fifo_size =
   5404 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   5405 			txq->txq_fifo_stall = 0;
   5406 		}
   5407 		break;
   5408 	case WM_T_82571:
   5409 	case WM_T_82572:
   5410 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   5411 	case WM_T_80003:
   5412 		sc->sc_pba = PBA_32K;
   5413 		break;
   5414 	case WM_T_82573:
   5415 		sc->sc_pba = PBA_12K;
   5416 		break;
   5417 	case WM_T_82574:
   5418 	case WM_T_82583:
   5419 		sc->sc_pba = PBA_20K;
   5420 		break;
   5421 	case WM_T_82576:
   5422 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   5423 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   5424 		break;
   5425 	case WM_T_82580:
   5426 	case WM_T_I350:
   5427 	case WM_T_I354:
   5428 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   5429 		break;
   5430 	case WM_T_I210:
   5431 	case WM_T_I211:
   5432 		sc->sc_pba = PBA_34K;
   5433 		break;
   5434 	case WM_T_ICH8:
   5435 		/* Workaround for a bit corruption issue in FIFO memory */
   5436 		sc->sc_pba = PBA_8K;
   5437 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   5438 		break;
   5439 	case WM_T_ICH9:
   5440 	case WM_T_ICH10:
   5441 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   5442 		    PBA_14K : PBA_10K;
   5443 		break;
   5444 	case WM_T_PCH:
   5445 	case WM_T_PCH2:	/* XXX 14K? */
   5446 	case WM_T_PCH_LPT:
   5447 	case WM_T_PCH_SPT:
   5448 	case WM_T_PCH_CNP:
   5449 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
   5450 		    PBA_12K : PBA_26K;
   5451 		break;
   5452 	default:
   5453 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5454 		    PBA_40K : PBA_48K;
   5455 		break;
   5456 	}
   5457 	/*
   5458 	 * Only old or non-multiqueue devices have the PBA register
   5459 	 * XXX Need special handling for 82575.
   5460 	 */
   5461 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5462 	    || (sc->sc_type == WM_T_82575))
   5463 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   5464 
   5465 	/* Prevent the PCI-E bus from sticking */
   5466 	if (sc->sc_flags & WM_F_PCIE) {
   5467 		int timeout = 800;
   5468 
   5469 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   5470 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5471 
   5472 		while (timeout--) {
   5473 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   5474 			    == 0)
   5475 				break;
   5476 			delay(100);
   5477 		}
   5478 		if (timeout == 0)
   5479 			device_printf(sc->sc_dev,
   5480 			    "failed to disable bus mastering\n");
   5481 	}
   5482 
   5483 	/* Set the completion timeout for interface */
   5484 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   5485 	    || (sc->sc_type == WM_T_82580)
   5486 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5487 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   5488 		wm_set_pcie_completion_timeout(sc);
   5489 
   5490 	/* Clear interrupt */
   5491 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5492 	if (wm_is_using_msix(sc)) {
   5493 		if (sc->sc_type != WM_T_82574) {
   5494 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5495 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5496 		} else
   5497 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5498 	}
   5499 
   5500 	/* Stop the transmit and receive processes. */
   5501 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5502 	sc->sc_rctl &= ~RCTL_EN;
   5503 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   5504 	CSR_WRITE_FLUSH(sc);
   5505 
   5506 	/* XXX set_tbi_sbp_82543() */
   5507 
   5508 	delay(10*1000);
   5509 
   5510 	/* Must acquire the MDIO ownership before MAC reset */
   5511 	switch (sc->sc_type) {
   5512 	case WM_T_82573:
   5513 	case WM_T_82574:
   5514 	case WM_T_82583:
   5515 		error = wm_get_hw_semaphore_82573(sc);
   5516 		break;
   5517 	default:
   5518 		break;
   5519 	}
   5520 
   5521 	/*
   5522 	 * 82541 Errata 29? & 82547 Errata 28?
   5523 	 * See also the description about PHY_RST bit in CTRL register
   5524 	 * in 8254x_GBe_SDM.pdf.
   5525 	 */
   5526 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   5527 		CSR_WRITE(sc, WMREG_CTRL,
   5528 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   5529 		CSR_WRITE_FLUSH(sc);
   5530 		delay(5000);
   5531 	}
   5532 
   5533 	switch (sc->sc_type) {
   5534 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   5535 	case WM_T_82541:
   5536 	case WM_T_82541_2:
   5537 	case WM_T_82547:
   5538 	case WM_T_82547_2:
   5539 		/*
   5540 		 * On some chipsets, a reset through a memory-mapped write
   5541 		 * cycle can cause the chip to reset before completing the
   5542 		 * write cycle. This causes major headache that can be avoided
   5543 		 * by issuing the reset via indirect register writes through
   5544 		 * I/O space.
   5545 		 *
   5546 		 * So, if we successfully mapped the I/O BAR at attach time,
   5547 		 * use that. Otherwise, try our luck with a memory-mapped
   5548 		 * reset.
   5549 		 */
   5550 		if (sc->sc_flags & WM_F_IOH_VALID)
   5551 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   5552 		else
   5553 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   5554 		break;
   5555 	case WM_T_82545_3:
   5556 	case WM_T_82546_3:
   5557 		/* Use the shadow control register on these chips. */
   5558 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   5559 		break;
   5560 	case WM_T_80003:
   5561 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5562 		if (sc->phy.acquire(sc) != 0)
   5563 			break;
   5564 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5565 		sc->phy.release(sc);
   5566 		break;
   5567 	case WM_T_ICH8:
   5568 	case WM_T_ICH9:
   5569 	case WM_T_ICH10:
   5570 	case WM_T_PCH:
   5571 	case WM_T_PCH2:
   5572 	case WM_T_PCH_LPT:
   5573 	case WM_T_PCH_SPT:
   5574 	case WM_T_PCH_CNP:
   5575 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5576 		if (wm_phy_resetisblocked(sc) == false) {
   5577 			/*
   5578 			 * Gate automatic PHY configuration by hardware on
   5579 			 * non-managed 82579
   5580 			 */
   5581 			if ((sc->sc_type == WM_T_PCH2)
   5582 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   5583 				== 0))
   5584 				wm_gate_hw_phy_config_ich8lan(sc, true);
   5585 
   5586 			reg |= CTRL_PHY_RESET;
   5587 			phy_reset = 1;
   5588 		} else
   5589 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   5590 		if (sc->phy.acquire(sc) != 0)
   5591 			break;
   5592 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5593 		/* Don't insert a completion barrier when reset */
   5594 		delay(20*1000);
   5595 		/*
   5596 		 * The EXTCNFCTR_MDIO_SW_OWNERSHIP bit is cleared by the reset,
   5597 		 * so don't use sc->phy.release(sc). Release sc_ich_phymtx
   5598 		 * only. See also wm_get_swflag_ich8lan().
   5599 		 */
   5600 		mutex_exit(sc->sc_ich_phymtx);
   5601 		break;
   5602 	case WM_T_82580:
   5603 	case WM_T_I350:
   5604 	case WM_T_I354:
   5605 	case WM_T_I210:
   5606 	case WM_T_I211:
   5607 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5608 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5609 			CSR_WRITE_FLUSH(sc);
   5610 		delay(5000);
   5611 		break;
   5612 	case WM_T_82542_2_0:
   5613 	case WM_T_82542_2_1:
   5614 	case WM_T_82543:
   5615 	case WM_T_82540:
   5616 	case WM_T_82545:
   5617 	case WM_T_82546:
   5618 	case WM_T_82571:
   5619 	case WM_T_82572:
   5620 	case WM_T_82573:
   5621 	case WM_T_82574:
   5622 	case WM_T_82575:
   5623 	case WM_T_82576:
   5624 	case WM_T_82583:
   5625 	default:
   5626 		/* Everything else can safely use the documented method. */
   5627 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5628 		break;
   5629 	}
   5630 
   5631 	/* Must release the MDIO ownership after MAC reset */
   5632 	switch (sc->sc_type) {
   5633 	case WM_T_82573:
   5634 	case WM_T_82574:
   5635 	case WM_T_82583:
   5636 		if (error == 0)
   5637 			wm_put_hw_semaphore_82573(sc);
   5638 		break;
   5639 	default:
   5640 		break;
   5641 	}
   5642 
   5643 	/* Set Phy Config Counter to 50msec */
   5644 	if (sc->sc_type == WM_T_PCH2) {
   5645 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5646 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5647 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5648 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5649 	}
   5650 
   5651 	if (phy_reset != 0)
   5652 		wm_get_cfg_done(sc);
   5653 
   5654 	/* Reload EEPROM */
   5655 	switch (sc->sc_type) {
   5656 	case WM_T_82542_2_0:
   5657 	case WM_T_82542_2_1:
   5658 	case WM_T_82543:
   5659 	case WM_T_82544:
   5660 		delay(10);
   5661 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5662 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5663 		CSR_WRITE_FLUSH(sc);
   5664 		delay(2000);
   5665 		break;
   5666 	case WM_T_82540:
   5667 	case WM_T_82545:
   5668 	case WM_T_82545_3:
   5669 	case WM_T_82546:
   5670 	case WM_T_82546_3:
   5671 		delay(5*1000);
   5672 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5673 		break;
   5674 	case WM_T_82541:
   5675 	case WM_T_82541_2:
   5676 	case WM_T_82547:
   5677 	case WM_T_82547_2:
   5678 		delay(20000);
   5679 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5680 		break;
   5681 	case WM_T_82571:
   5682 	case WM_T_82572:
   5683 	case WM_T_82573:
   5684 	case WM_T_82574:
   5685 	case WM_T_82583:
   5686 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5687 			delay(10);
   5688 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5689 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5690 			CSR_WRITE_FLUSH(sc);
   5691 		}
   5692 		/* check EECD_EE_AUTORD */
   5693 		wm_get_auto_rd_done(sc);
   5694 		/*
   5695 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5696 		 * is set.
   5697 		 */
   5698 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5699 		    || (sc->sc_type == WM_T_82583))
   5700 			delay(25*1000);
   5701 		break;
   5702 	case WM_T_82575:
   5703 	case WM_T_82576:
   5704 	case WM_T_82580:
   5705 	case WM_T_I350:
   5706 	case WM_T_I354:
   5707 	case WM_T_I210:
   5708 	case WM_T_I211:
   5709 	case WM_T_80003:
   5710 		/* check EECD_EE_AUTORD */
   5711 		wm_get_auto_rd_done(sc);
   5712 		break;
   5713 	case WM_T_ICH8:
   5714 	case WM_T_ICH9:
   5715 	case WM_T_ICH10:
   5716 	case WM_T_PCH:
   5717 	case WM_T_PCH2:
   5718 	case WM_T_PCH_LPT:
   5719 	case WM_T_PCH_SPT:
   5720 	case WM_T_PCH_CNP:
   5721 		break;
   5722 	default:
   5723 		panic("%s: unknown type\n", __func__);
   5724 	}
   5725 
   5726 	/* Check whether EEPROM is present or not */
   5727 	switch (sc->sc_type) {
   5728 	case WM_T_82575:
   5729 	case WM_T_82576:
   5730 	case WM_T_82580:
   5731 	case WM_T_I350:
   5732 	case WM_T_I354:
   5733 	case WM_T_ICH8:
   5734 	case WM_T_ICH9:
   5735 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5736 			/* Not found */
   5737 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5738 			if (sc->sc_type == WM_T_82575)
   5739 				wm_reset_init_script_82575(sc);
   5740 		}
   5741 		break;
   5742 	default:
   5743 		break;
   5744 	}
   5745 
   5746 	if (phy_reset != 0)
   5747 		wm_phy_post_reset(sc);
   5748 
   5749 	if ((sc->sc_type == WM_T_82580)
   5750 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5751 		/* Clear global device reset status bit */
   5752 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5753 	}
   5754 
   5755 	/* Clear any pending interrupt events. */
   5756 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5757 	reg = CSR_READ(sc, WMREG_ICR);
   5758 	if (wm_is_using_msix(sc)) {
   5759 		if (sc->sc_type != WM_T_82574) {
   5760 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5761 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5762 		} else
   5763 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5764 	}
   5765 
   5766 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5767 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5768 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5769 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5770 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5771 		reg |= KABGTXD_BGSQLBIAS;
   5772 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5773 	}
   5774 
   5775 	/* Reload sc_ctrl */
   5776 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5777 
   5778 	wm_set_eee(sc);
   5779 
   5780 	/*
   5781 	 * For PCH, this write will make sure that any noise will be detected
   5782 	 * as a CRC error and be dropped rather than show up as a bad packet
   5783 	 * to the DMA engine
   5784 	 */
   5785 	if (sc->sc_type == WM_T_PCH)
   5786 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5787 
   5788 	if (sc->sc_type >= WM_T_82544)
   5789 		CSR_WRITE(sc, WMREG_WUC, 0);
   5790 
   5791 	if (sc->sc_type < WM_T_82575)
   5792 		wm_disable_aspm(sc); /* Workaround for some chips */
   5793 
   5794 	wm_reset_mdicnfg_82580(sc);
   5795 
   5796 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5797 		wm_pll_workaround_i210(sc);
   5798 
   5799 	if (sc->sc_type == WM_T_80003) {
   5800 		/* Default to TRUE to enable the MDIC W/A */
   5801 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5802 
   5803 		rv = wm_kmrn_readreg(sc,
   5804 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5805 		if (rv == 0) {
   5806 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5807 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5808 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5809 			else
   5810 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5811 		}
   5812 	}
   5813 }
   5814 
   5815 /*
   5816  * wm_add_rxbuf:
   5817  *
   5818  *	Add a receive buffer to the indiciated descriptor.
   5819  */
   5820 static int
   5821 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5822 {
   5823 	struct wm_softc *sc = rxq->rxq_sc;
   5824 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5825 	struct mbuf *m;
   5826 	int error;
   5827 
   5828 	KASSERT(mutex_owned(rxq->rxq_lock));
   5829 
   5830 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5831 	if (m == NULL)
   5832 		return ENOBUFS;
   5833 
   5834 	MCLGET(m, M_DONTWAIT);
   5835 	if ((m->m_flags & M_EXT) == 0) {
   5836 		m_freem(m);
   5837 		return ENOBUFS;
   5838 	}
   5839 
   5840 	if (rxs->rxs_mbuf != NULL)
   5841 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5842 
   5843 	rxs->rxs_mbuf = m;
   5844 
   5845 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5846 	/*
   5847 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5848 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5849 	 */
   5850 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5851 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5852 	if (error) {
   5853 		/* XXX XXX XXX */
   5854 		aprint_error_dev(sc->sc_dev,
   5855 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5856 		panic("wm_add_rxbuf");
   5857 	}
   5858 
   5859 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5860 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5861 
   5862 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5863 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5864 			wm_init_rxdesc(rxq, idx);
   5865 	} else
   5866 		wm_init_rxdesc(rxq, idx);
   5867 
   5868 	return 0;
   5869 }
   5870 
   5871 /*
   5872  * wm_rxdrain:
   5873  *
   5874  *	Drain the receive queue.
   5875  */
   5876 static void
   5877 wm_rxdrain(struct wm_rxqueue *rxq)
   5878 {
   5879 	struct wm_softc *sc = rxq->rxq_sc;
   5880 	struct wm_rxsoft *rxs;
   5881 	int i;
   5882 
   5883 	KASSERT(mutex_owned(rxq->rxq_lock));
   5884 
   5885 	for (i = 0; i < WM_NRXDESC; i++) {
   5886 		rxs = &rxq->rxq_soft[i];
   5887 		if (rxs->rxs_mbuf != NULL) {
   5888 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5889 			m_freem(rxs->rxs_mbuf);
   5890 			rxs->rxs_mbuf = NULL;
   5891 		}
   5892 	}
   5893 }
   5894 
   5895 /*
   5896  * Setup registers for RSS.
   5897  *
   5898  * XXX not yet VMDq support
   5899  */
   5900 static void
   5901 wm_init_rss(struct wm_softc *sc)
   5902 {
   5903 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5904 	int i;
   5905 
   5906 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5907 
   5908 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5909 		unsigned int qid, reta_ent;
   5910 
   5911 		qid  = i % sc->sc_nqueues;
   5912 		switch (sc->sc_type) {
   5913 		case WM_T_82574:
   5914 			reta_ent = __SHIFTIN(qid,
   5915 			    RETA_ENT_QINDEX_MASK_82574);
   5916 			break;
   5917 		case WM_T_82575:
   5918 			reta_ent = __SHIFTIN(qid,
   5919 			    RETA_ENT_QINDEX1_MASK_82575);
   5920 			break;
   5921 		default:
   5922 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5923 			break;
   5924 		}
   5925 
   5926 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5927 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5928 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5929 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5930 	}
   5931 
   5932 	rss_getkey((uint8_t *)rss_key);
   5933 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5934 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5935 
   5936 	if (sc->sc_type == WM_T_82574)
   5937 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5938 	else
   5939 		mrqc = MRQC_ENABLE_RSS_MQ;
   5940 
   5941 	/*
   5942 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5943 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5944 	 */
   5945 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5946 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5947 #if 0
   5948 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5949 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   5950 #endif
   5951 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   5952 
   5953 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5954 }
   5955 
   5956 /*
   5957  * Adjust TX and RX queue numbers which the system actulally uses.
   5958  *
   5959  * The numbers are affected by below parameters.
   5960  *     - The nubmer of hardware queues
   5961  *     - The number of MSI-X vectors (= "nvectors" argument)
   5962  *     - ncpu
   5963  */
   5964 static void
   5965 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5966 {
   5967 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5968 
   5969 	if (nvectors < 2) {
   5970 		sc->sc_nqueues = 1;
   5971 		return;
   5972 	}
   5973 
   5974 	switch (sc->sc_type) {
   5975 	case WM_T_82572:
   5976 		hw_ntxqueues = 2;
   5977 		hw_nrxqueues = 2;
   5978 		break;
   5979 	case WM_T_82574:
   5980 		hw_ntxqueues = 2;
   5981 		hw_nrxqueues = 2;
   5982 		break;
   5983 	case WM_T_82575:
   5984 		hw_ntxqueues = 4;
   5985 		hw_nrxqueues = 4;
   5986 		break;
   5987 	case WM_T_82576:
   5988 		hw_ntxqueues = 16;
   5989 		hw_nrxqueues = 16;
   5990 		break;
   5991 	case WM_T_82580:
   5992 	case WM_T_I350:
   5993 	case WM_T_I354:
   5994 		hw_ntxqueues = 8;
   5995 		hw_nrxqueues = 8;
   5996 		break;
   5997 	case WM_T_I210:
   5998 		hw_ntxqueues = 4;
   5999 		hw_nrxqueues = 4;
   6000 		break;
   6001 	case WM_T_I211:
   6002 		hw_ntxqueues = 2;
   6003 		hw_nrxqueues = 2;
   6004 		break;
   6005 		/*
   6006 		 * The below Ethernet controllers do not support MSI-X;
   6007 		 * this driver doesn't let them use multiqueue.
   6008 		 *     - WM_T_80003
   6009 		 *     - WM_T_ICH8
   6010 		 *     - WM_T_ICH9
   6011 		 *     - WM_T_ICH10
   6012 		 *     - WM_T_PCH
   6013 		 *     - WM_T_PCH2
   6014 		 *     - WM_T_PCH_LPT
   6015 		 */
   6016 	default:
   6017 		hw_ntxqueues = 1;
   6018 		hw_nrxqueues = 1;
   6019 		break;
   6020 	}
   6021 
   6022 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   6023 
   6024 	/*
   6025 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   6026 	 * the number of queues used actually.
   6027 	 */
   6028 	if (nvectors < hw_nqueues + 1)
   6029 		sc->sc_nqueues = nvectors - 1;
   6030 	else
   6031 		sc->sc_nqueues = hw_nqueues;
   6032 
   6033 	/*
   6034 	 * As queues more than CPUs cannot improve scaling, we limit
   6035 	 * the number of queues used actually.
   6036 	 */
   6037 	if (ncpu < sc->sc_nqueues)
   6038 		sc->sc_nqueues = ncpu;
   6039 }
   6040 
   6041 static inline bool
   6042 wm_is_using_msix(struct wm_softc *sc)
   6043 {
   6044 
   6045 	return (sc->sc_nintrs > 1);
   6046 }
   6047 
   6048 static inline bool
   6049 wm_is_using_multiqueue(struct wm_softc *sc)
   6050 {
   6051 
   6052 	return (sc->sc_nqueues > 1);
   6053 }
   6054 
   6055 static int
   6056 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   6057 {
   6058 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   6059 
   6060 	wmq->wmq_id = qidx;
   6061 	wmq->wmq_intr_idx = intr_idx;
   6062 	wmq->wmq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
   6063 	    wm_handle_queue, wmq);
   6064 	if (wmq->wmq_si != NULL)
   6065 		return 0;
   6066 
   6067 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   6068 	    wmq->wmq_id);
   6069 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   6070 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6071 	return ENOMEM;
   6072 }
   6073 
   6074 /*
   6075  * Both single interrupt MSI and INTx can use this function.
   6076  */
   6077 static int
   6078 wm_setup_legacy(struct wm_softc *sc)
   6079 {
   6080 	pci_chipset_tag_t pc = sc->sc_pc;
   6081 	const char *intrstr = NULL;
   6082 	char intrbuf[PCI_INTRSTR_LEN];
   6083 	int error;
   6084 
   6085 	error = wm_alloc_txrx_queues(sc);
   6086 	if (error) {
   6087 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6088 		    error);
   6089 		return ENOMEM;
   6090 	}
   6091 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   6092 	    sizeof(intrbuf));
   6093 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   6094 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   6095 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   6096 	if (sc->sc_ihs[0] == NULL) {
   6097 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   6098 		    (pci_intr_type(pc, sc->sc_intrs[0])
   6099 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   6100 		return ENOMEM;
   6101 	}
   6102 
   6103 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   6104 	sc->sc_nintrs = 1;
   6105 
   6106 	return wm_softint_establish_queue(sc, 0, 0);
   6107 }
   6108 
   6109 static int
   6110 wm_setup_msix(struct wm_softc *sc)
   6111 {
   6112 	void *vih;
   6113 	kcpuset_t *affinity;
   6114 	int qidx, error, intr_idx, txrx_established;
   6115 	pci_chipset_tag_t pc = sc->sc_pc;
   6116 	const char *intrstr = NULL;
   6117 	char intrbuf[PCI_INTRSTR_LEN];
   6118 	char intr_xname[INTRDEVNAMEBUF];
   6119 
   6120 	if (sc->sc_nqueues < ncpu) {
   6121 		/*
   6122 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   6123 		 * interrupts start from CPU#1.
   6124 		 */
   6125 		sc->sc_affinity_offset = 1;
   6126 	} else {
   6127 		/*
   6128 		 * In this case, this device use all CPUs. So, we unify
   6129 		 * affinitied cpu_index to msix vector number for readability.
   6130 		 */
   6131 		sc->sc_affinity_offset = 0;
   6132 	}
   6133 
   6134 	error = wm_alloc_txrx_queues(sc);
   6135 	if (error) {
   6136 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6137 		    error);
   6138 		return ENOMEM;
   6139 	}
   6140 
   6141 	kcpuset_create(&affinity, false);
   6142 	intr_idx = 0;
   6143 
   6144 	/*
   6145 	 * TX and RX
   6146 	 */
   6147 	txrx_established = 0;
   6148 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6149 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6150 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   6151 
   6152 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6153 		    sizeof(intrbuf));
   6154 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   6155 		    PCI_INTR_MPSAFE, true);
   6156 		memset(intr_xname, 0, sizeof(intr_xname));
   6157 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   6158 		    device_xname(sc->sc_dev), qidx);
   6159 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6160 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   6161 		if (vih == NULL) {
   6162 			aprint_error_dev(sc->sc_dev,
   6163 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   6164 			    intrstr ? " at " : "",
   6165 			    intrstr ? intrstr : "");
   6166 
   6167 			goto fail;
   6168 		}
   6169 		kcpuset_zero(affinity);
   6170 		/* Round-robin affinity */
   6171 		kcpuset_set(affinity, affinity_to);
   6172 		error = interrupt_distribute(vih, affinity, NULL);
   6173 		if (error == 0) {
   6174 			aprint_normal_dev(sc->sc_dev,
   6175 			    "for TX and RX interrupting at %s affinity to %u\n",
   6176 			    intrstr, affinity_to);
   6177 		} else {
   6178 			aprint_normal_dev(sc->sc_dev,
   6179 			    "for TX and RX interrupting at %s\n", intrstr);
   6180 		}
   6181 		sc->sc_ihs[intr_idx] = vih;
   6182 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   6183 			goto fail;
   6184 		txrx_established++;
   6185 		intr_idx++;
   6186 	}
   6187 
   6188 	/* LINK */
   6189 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6190 	    sizeof(intrbuf));
   6191 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   6192 	memset(intr_xname, 0, sizeof(intr_xname));
   6193 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   6194 	    device_xname(sc->sc_dev));
   6195 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6196 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   6197 	if (vih == NULL) {
   6198 		aprint_error_dev(sc->sc_dev,
   6199 		    "unable to establish MSI-X(for LINK)%s%s\n",
   6200 		    intrstr ? " at " : "",
   6201 		    intrstr ? intrstr : "");
   6202 
   6203 		goto fail;
   6204 	}
   6205 	/* Keep default affinity to LINK interrupt */
   6206 	aprint_normal_dev(sc->sc_dev,
   6207 	    "for LINK interrupting at %s\n", intrstr);
   6208 	sc->sc_ihs[intr_idx] = vih;
   6209 	sc->sc_link_intr_idx = intr_idx;
   6210 
   6211 	sc->sc_nintrs = sc->sc_nqueues + 1;
   6212 	kcpuset_destroy(affinity);
   6213 	return 0;
   6214 
   6215 fail:
   6216 	for (qidx = 0; qidx < txrx_established; qidx++) {
   6217 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6218 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   6219 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6220 	}
   6221 
   6222 	kcpuset_destroy(affinity);
   6223 	return ENOMEM;
   6224 }
   6225 
   6226 static void
   6227 wm_unset_stopping_flags(struct wm_softc *sc)
   6228 {
   6229 	int i;
   6230 
   6231 	KASSERT(mutex_owned(sc->sc_core_lock));
   6232 
   6233 	/* Must unset stopping flags in ascending order. */
   6234 	for (i = 0; i < sc->sc_nqueues; i++) {
   6235 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6236 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6237 
   6238 		mutex_enter(txq->txq_lock);
   6239 		txq->txq_stopping = false;
   6240 		mutex_exit(txq->txq_lock);
   6241 
   6242 		mutex_enter(rxq->rxq_lock);
   6243 		rxq->rxq_stopping = false;
   6244 		mutex_exit(rxq->rxq_lock);
   6245 	}
   6246 
   6247 	sc->sc_core_stopping = false;
   6248 }
   6249 
   6250 static void
   6251 wm_set_stopping_flags(struct wm_softc *sc)
   6252 {
   6253 	int i;
   6254 
   6255 	KASSERT(mutex_owned(sc->sc_core_lock));
   6256 
   6257 	sc->sc_core_stopping = true;
   6258 
   6259 	/* Must set stopping flags in ascending order. */
   6260 	for (i = 0; i < sc->sc_nqueues; i++) {
   6261 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6262 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6263 
   6264 		mutex_enter(rxq->rxq_lock);
   6265 		rxq->rxq_stopping = true;
   6266 		mutex_exit(rxq->rxq_lock);
   6267 
   6268 		mutex_enter(txq->txq_lock);
   6269 		txq->txq_stopping = true;
   6270 		mutex_exit(txq->txq_lock);
   6271 	}
   6272 }
   6273 
   6274 /*
   6275  * Write interrupt interval value to ITR or EITR
   6276  */
   6277 static void
   6278 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   6279 {
   6280 
   6281 	if (!wmq->wmq_set_itr)
   6282 		return;
   6283 
   6284 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6285 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   6286 
   6287 		/*
   6288 		 * 82575 doesn't have CNT_INGR field.
   6289 		 * So, overwrite counter field by software.
   6290 		 */
   6291 		if (sc->sc_type == WM_T_82575)
   6292 			eitr |= __SHIFTIN(wmq->wmq_itr,
   6293 			    EITR_COUNTER_MASK_82575);
   6294 		else
   6295 			eitr |= EITR_CNT_INGR;
   6296 
   6297 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   6298 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   6299 		/*
   6300 		 * 82574 has both ITR and EITR. SET EITR when we use
   6301 		 * the multi queue function with MSI-X.
   6302 		 */
   6303 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   6304 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   6305 	} else {
   6306 		KASSERT(wmq->wmq_id == 0);
   6307 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   6308 	}
   6309 
   6310 	wmq->wmq_set_itr = false;
   6311 }
   6312 
   6313 /*
   6314  * TODO
   6315  * Below dynamic calculation of itr is almost the same as Linux igb,
   6316  * however it does not fit to wm(4). So, we will have been disable AIM
   6317  * until we will find appropriate calculation of itr.
   6318  */
   6319 /*
   6320  * Calculate interrupt interval value to be going to write register in
   6321  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   6322  */
   6323 static void
   6324 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   6325 {
   6326 #ifdef NOTYET
   6327 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6328 	struct wm_txqueue *txq = &wmq->wmq_txq;
   6329 	uint32_t avg_size = 0;
   6330 	uint32_t new_itr;
   6331 
   6332 	if (rxq->rxq_packets)
   6333 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   6334 	if (txq->txq_packets)
   6335 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   6336 
   6337 	if (avg_size == 0) {
   6338 		new_itr = 450; /* restore default value */
   6339 		goto out;
   6340 	}
   6341 
   6342 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   6343 	avg_size += 24;
   6344 
   6345 	/* Don't starve jumbo frames */
   6346 	avg_size = uimin(avg_size, 3000);
   6347 
   6348 	/* Give a little boost to mid-size frames */
   6349 	if ((avg_size > 300) && (avg_size < 1200))
   6350 		new_itr = avg_size / 3;
   6351 	else
   6352 		new_itr = avg_size / 2;
   6353 
   6354 out:
   6355 	/*
   6356 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   6357 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   6358 	 */
   6359 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   6360 		new_itr *= 4;
   6361 
   6362 	if (new_itr != wmq->wmq_itr) {
   6363 		wmq->wmq_itr = new_itr;
   6364 		wmq->wmq_set_itr = true;
   6365 	} else
   6366 		wmq->wmq_set_itr = false;
   6367 
   6368 	rxq->rxq_packets = 0;
   6369 	rxq->rxq_bytes = 0;
   6370 	txq->txq_packets = 0;
   6371 	txq->txq_bytes = 0;
   6372 #endif
   6373 }
   6374 
   6375 static void
   6376 wm_init_sysctls(struct wm_softc *sc)
   6377 {
   6378 	struct sysctllog **log;
   6379 	const struct sysctlnode *rnode, *qnode, *cnode;
   6380 	int i, rv;
   6381 	const char *dvname;
   6382 
   6383 	log = &sc->sc_sysctllog;
   6384 	dvname = device_xname(sc->sc_dev);
   6385 
   6386 	rv = sysctl_createv(log, 0, NULL, &rnode,
   6387 	    0, CTLTYPE_NODE, dvname,
   6388 	    SYSCTL_DESCR("wm information and settings"),
   6389 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   6390 	if (rv != 0)
   6391 		goto err;
   6392 
   6393 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6394 	    CTLTYPE_BOOL, "txrx_workqueue",
   6395 	    SYSCTL_DESCR("Use workqueue for packet processing"),
   6396 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   6397 	if (rv != 0)
   6398 		goto teardown;
   6399 
   6400 	for (i = 0; i < sc->sc_nqueues; i++) {
   6401 		struct wm_queue *wmq = &sc->sc_queue[i];
   6402 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6403 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6404 
   6405 		snprintf(sc->sc_queue[i].sysctlname,
   6406 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
   6407 
   6408 		if (sysctl_createv(log, 0, &rnode, &qnode,
   6409 		    0, CTLTYPE_NODE,
   6410 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
   6411 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   6412 			break;
   6413 
   6414 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6415 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6416 		    "txq_free", SYSCTL_DESCR("TX queue free"),
   6417 		    NULL, 0, &txq->txq_free,
   6418 		    0, CTL_CREATE, CTL_EOL) != 0)
   6419 			break;
   6420 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6421 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6422 		    "txd_head", SYSCTL_DESCR("TX descriptor head"),
   6423 		    wm_sysctl_tdh_handler, 0, (void *)txq,
   6424 		    0, CTL_CREATE, CTL_EOL) != 0)
   6425 			break;
   6426 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6427 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6428 		    "txd_tail", SYSCTL_DESCR("TX descriptor tail"),
   6429 		    wm_sysctl_tdt_handler, 0, (void *)txq,
   6430 		    0, CTL_CREATE, CTL_EOL) != 0)
   6431 			break;
   6432 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6433 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6434 		    "txq_next", SYSCTL_DESCR("TX queue next"),
   6435 		    NULL, 0, &txq->txq_next,
   6436 		    0, CTL_CREATE, CTL_EOL) != 0)
   6437 			break;
   6438 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6439 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6440 		    "txq_sfree", SYSCTL_DESCR("TX queue sfree"),
   6441 		    NULL, 0, &txq->txq_sfree,
   6442 		    0, CTL_CREATE, CTL_EOL) != 0)
   6443 			break;
   6444 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6445 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6446 		    "txq_snext", SYSCTL_DESCR("TX queue snext"),
   6447 		    NULL, 0, &txq->txq_snext,
   6448 		    0, CTL_CREATE, CTL_EOL) != 0)
   6449 			break;
   6450 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6451 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6452 		    "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"),
   6453 		    NULL, 0, &txq->txq_sdirty,
   6454 		    0, CTL_CREATE, CTL_EOL) != 0)
   6455 			break;
   6456 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6457 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6458 		    "txq_flags", SYSCTL_DESCR("TX queue flags"),
   6459 		    NULL, 0, &txq->txq_flags,
   6460 		    0, CTL_CREATE, CTL_EOL) != 0)
   6461 			break;
   6462 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6463 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6464 		    "txq_stopping", SYSCTL_DESCR("TX queue stopping"),
   6465 		    NULL, 0, &txq->txq_stopping,
   6466 		    0, CTL_CREATE, CTL_EOL) != 0)
   6467 			break;
   6468 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6469 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6470 		    "txq_sending", SYSCTL_DESCR("TX queue sending"),
   6471 		    NULL, 0, &txq->txq_sending,
   6472 		    0, CTL_CREATE, CTL_EOL) != 0)
   6473 			break;
   6474 
   6475 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6476 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6477 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
   6478 		    NULL, 0, &rxq->rxq_ptr,
   6479 		    0, CTL_CREATE, CTL_EOL) != 0)
   6480 			break;
   6481 	}
   6482 
   6483 #ifdef WM_DEBUG
   6484 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6485 	    CTLTYPE_INT, "debug_flags",
   6486 	    SYSCTL_DESCR(
   6487 		    "Debug flags:\n"	\
   6488 		    "\t0x01 LINK\n"	\
   6489 		    "\t0x02 TX\n"	\
   6490 		    "\t0x04 RX\n"	\
   6491 		    "\t0x08 GMII\n"	\
   6492 		    "\t0x10 MANAGE\n"	\
   6493 		    "\t0x20 NVM\n"	\
   6494 		    "\t0x40 INIT\n"	\
   6495 		    "\t0x80 LOCK"),
   6496 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
   6497 	if (rv != 0)
   6498 		goto teardown;
   6499 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6500 	    CTLTYPE_BOOL, "trigger_reset",
   6501 	    SYSCTL_DESCR("Trigger an interface reset"),
   6502 	    NULL, 0, &sc->sc_trigger_reset, 0, CTL_CREATE, CTL_EOL);
   6503 	if (rv != 0)
   6504 		goto teardown;
   6505 #endif
   6506 
   6507 	return;
   6508 
   6509 teardown:
   6510 	sysctl_teardown(log);
   6511 err:
   6512 	sc->sc_sysctllog = NULL;
   6513 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   6514 	    __func__, rv);
   6515 }
   6516 
   6517 /*
   6518  * wm_init:		[ifnet interface function]
   6519  *
   6520  *	Initialize the interface.
   6521  */
   6522 static int
   6523 wm_init(struct ifnet *ifp)
   6524 {
   6525 	struct wm_softc *sc = ifp->if_softc;
   6526 	int ret;
   6527 
   6528 	KASSERT(IFNET_LOCKED(ifp));
   6529 
   6530 	if (sc->sc_dying)
   6531 		return ENXIO;
   6532 
   6533 	mutex_enter(sc->sc_core_lock);
   6534 	ret = wm_init_locked(ifp);
   6535 	mutex_exit(sc->sc_core_lock);
   6536 
   6537 	return ret;
   6538 }
   6539 
   6540 static int
   6541 wm_init_locked(struct ifnet *ifp)
   6542 {
   6543 	struct wm_softc *sc = ifp->if_softc;
   6544 	struct ethercom *ec = &sc->sc_ethercom;
   6545 	int i, j, trynum, error = 0;
   6546 	uint32_t reg, sfp_mask = 0;
   6547 
   6548 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6549 		device_xname(sc->sc_dev), __func__));
   6550 	KASSERT(IFNET_LOCKED(ifp));
   6551 	KASSERT(mutex_owned(sc->sc_core_lock));
   6552 
   6553 	/*
   6554 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   6555 	 * There is a small but measurable benefit to avoiding the adjusment
   6556 	 * of the descriptor so that the headers are aligned, for normal mtu,
   6557 	 * on such platforms.  One possibility is that the DMA itself is
   6558 	 * slightly more efficient if the front of the entire packet (instead
   6559 	 * of the front of the headers) is aligned.
   6560 	 *
   6561 	 * Note we must always set align_tweak to 0 if we are using
   6562 	 * jumbo frames.
   6563 	 */
   6564 #ifdef __NO_STRICT_ALIGNMENT
   6565 	sc->sc_align_tweak = 0;
   6566 #else
   6567 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   6568 		sc->sc_align_tweak = 0;
   6569 	else
   6570 		sc->sc_align_tweak = 2;
   6571 #endif /* __NO_STRICT_ALIGNMENT */
   6572 
   6573 	/* Cancel any pending I/O. */
   6574 	wm_stop_locked(ifp, false, false);
   6575 
   6576 	/* Update statistics before reset */
   6577 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   6578 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   6579 
   6580 	/* >= PCH_SPT hardware workaround before reset. */
   6581 	if (sc->sc_type >= WM_T_PCH_SPT)
   6582 		wm_flush_desc_rings(sc);
   6583 
   6584 	/* Reset the chip to a known state. */
   6585 	wm_reset(sc);
   6586 
   6587 	/*
   6588 	 * AMT based hardware can now take control from firmware
   6589 	 * Do this after reset.
   6590 	 */
   6591 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   6592 		wm_get_hw_control(sc);
   6593 
   6594 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   6595 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   6596 		wm_legacy_irq_quirk_spt(sc);
   6597 
   6598 	/* Init hardware bits */
   6599 	wm_initialize_hardware_bits(sc);
   6600 
   6601 	/* Reset the PHY. */
   6602 	if (sc->sc_flags & WM_F_HAS_MII)
   6603 		wm_gmii_reset(sc);
   6604 
   6605 	if (sc->sc_type >= WM_T_ICH8) {
   6606 		reg = CSR_READ(sc, WMREG_GCR);
   6607 		/*
   6608 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   6609 		 * default after reset.
   6610 		 */
   6611 		if (sc->sc_type == WM_T_ICH8)
   6612 			reg |= GCR_NO_SNOOP_ALL;
   6613 		else
   6614 			reg &= ~GCR_NO_SNOOP_ALL;
   6615 		CSR_WRITE(sc, WMREG_GCR, reg);
   6616 	}
   6617 
   6618 	if ((sc->sc_type >= WM_T_ICH8)
   6619 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   6620 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   6621 
   6622 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6623 		reg |= CTRL_EXT_RO_DIS;
   6624 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6625 	}
   6626 
   6627 	/* Calculate (E)ITR value */
   6628 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   6629 		/*
   6630 		 * For NEWQUEUE's EITR (except for 82575).
   6631 		 * 82575's EITR should be set same throttling value as other
   6632 		 * old controllers' ITR because the interrupt/sec calculation
   6633 		 * is the same, that is, 1,000,000,000 / (N * 256).
   6634 		 *
   6635 		 * 82574's EITR should be set same throttling value as ITR.
   6636 		 *
   6637 		 * For N interrupts/sec, set this value to:
   6638 		 * 1,000,000 / N in contrast to ITR throttling value.
   6639 		 */
   6640 		sc->sc_itr_init = 450;
   6641 	} else if (sc->sc_type >= WM_T_82543) {
   6642 		/*
   6643 		 * Set up the interrupt throttling register (units of 256ns)
   6644 		 * Note that a footnote in Intel's documentation says this
   6645 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   6646 		 * or 10Mbit mode.  Empirically, it appears to be the case
   6647 		 * that that is also true for the 1024ns units of the other
   6648 		 * interrupt-related timer registers -- so, really, we ought
   6649 		 * to divide this value by 4 when the link speed is low.
   6650 		 *
   6651 		 * XXX implement this division at link speed change!
   6652 		 */
   6653 
   6654 		/*
   6655 		 * For N interrupts/sec, set this value to:
   6656 		 * 1,000,000,000 / (N * 256).  Note that we set the
   6657 		 * absolute and packet timer values to this value
   6658 		 * divided by 4 to get "simple timer" behavior.
   6659 		 */
   6660 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   6661 	}
   6662 
   6663 	error = wm_init_txrx_queues(sc);
   6664 	if (error)
   6665 		goto out;
   6666 
   6667 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   6668 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   6669 	    (sc->sc_type >= WM_T_82575))
   6670 		wm_serdes_power_up_link_82575(sc);
   6671 
   6672 	/* Clear out the VLAN table -- we don't use it (yet). */
   6673 	CSR_WRITE(sc, WMREG_VET, 0);
   6674 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   6675 		trynum = 10; /* Due to hw errata */
   6676 	else
   6677 		trynum = 1;
   6678 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   6679 		for (j = 0; j < trynum; j++)
   6680 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   6681 
   6682 	/*
   6683 	 * Set up flow-control parameters.
   6684 	 *
   6685 	 * XXX Values could probably stand some tuning.
   6686 	 */
   6687 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   6688 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   6689 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   6690 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   6691 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   6692 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   6693 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   6694 	}
   6695 
   6696 	sc->sc_fcrtl = FCRTL_DFLT;
   6697 	if (sc->sc_type < WM_T_82543) {
   6698 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   6699 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   6700 	} else {
   6701 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   6702 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   6703 	}
   6704 
   6705 	if (sc->sc_type == WM_T_80003)
   6706 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   6707 	else
   6708 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   6709 
   6710 	/* Writes the control register. */
   6711 	wm_set_vlan(sc);
   6712 
   6713 	if (sc->sc_flags & WM_F_HAS_MII) {
   6714 		uint16_t kmreg;
   6715 
   6716 		switch (sc->sc_type) {
   6717 		case WM_T_80003:
   6718 		case WM_T_ICH8:
   6719 		case WM_T_ICH9:
   6720 		case WM_T_ICH10:
   6721 		case WM_T_PCH:
   6722 		case WM_T_PCH2:
   6723 		case WM_T_PCH_LPT:
   6724 		case WM_T_PCH_SPT:
   6725 		case WM_T_PCH_CNP:
   6726 			/*
   6727 			 * Set the mac to wait the maximum time between each
   6728 			 * iteration and increase the max iterations when
   6729 			 * polling the phy; this fixes erroneous timeouts at
   6730 			 * 10Mbps.
   6731 			 */
   6732 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   6733 			    0xFFFF);
   6734 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6735 			    &kmreg);
   6736 			kmreg |= 0x3F;
   6737 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6738 			    kmreg);
   6739 			break;
   6740 		default:
   6741 			break;
   6742 		}
   6743 
   6744 		if (sc->sc_type == WM_T_80003) {
   6745 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6746 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6747 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6748 
   6749 			/* Bypass RX and TX FIFOs */
   6750 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6751 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6752 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6753 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6754 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6755 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6756 		}
   6757 	}
   6758 #if 0
   6759 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6760 #endif
   6761 
   6762 	/* Set up checksum offload parameters. */
   6763 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6764 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6765 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6766 		reg |= RXCSUM_IPOFL;
   6767 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6768 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6769 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6770 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6771 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6772 
   6773 	/* Set registers about MSI-X */
   6774 	if (wm_is_using_msix(sc)) {
   6775 		uint32_t ivar, qintr_idx;
   6776 		struct wm_queue *wmq;
   6777 		unsigned int qid;
   6778 
   6779 		if (sc->sc_type == WM_T_82575) {
   6780 			/* Interrupt control */
   6781 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6782 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6783 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6784 
   6785 			/* TX and RX */
   6786 			for (i = 0; i < sc->sc_nqueues; i++) {
   6787 				wmq = &sc->sc_queue[i];
   6788 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6789 				    EITR_TX_QUEUE(wmq->wmq_id)
   6790 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6791 			}
   6792 			/* Link status */
   6793 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6794 			    EITR_OTHER);
   6795 		} else if (sc->sc_type == WM_T_82574) {
   6796 			/* Interrupt control */
   6797 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6798 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6799 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6800 
   6801 			/*
   6802 			 * Work around issue with spurious interrupts
   6803 			 * in MSI-X mode.
   6804 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6805 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6806 			 */
   6807 			reg = CSR_READ(sc, WMREG_RFCTL);
   6808 			reg |= WMREG_RFCTL_ACKDIS;
   6809 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6810 
   6811 			ivar = 0;
   6812 			/* TX and RX */
   6813 			for (i = 0; i < sc->sc_nqueues; i++) {
   6814 				wmq = &sc->sc_queue[i];
   6815 				qid = wmq->wmq_id;
   6816 				qintr_idx = wmq->wmq_intr_idx;
   6817 
   6818 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6819 				    IVAR_TX_MASK_Q_82574(qid));
   6820 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6821 				    IVAR_RX_MASK_Q_82574(qid));
   6822 			}
   6823 			/* Link status */
   6824 			ivar |= __SHIFTIN((IVAR_VALID_82574
   6825 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   6826 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   6827 		} else {
   6828 			/* Interrupt control */
   6829 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   6830 			    | GPIE_EIAME | GPIE_PBA);
   6831 
   6832 			switch (sc->sc_type) {
   6833 			case WM_T_82580:
   6834 			case WM_T_I350:
   6835 			case WM_T_I354:
   6836 			case WM_T_I210:
   6837 			case WM_T_I211:
   6838 				/* TX and RX */
   6839 				for (i = 0; i < sc->sc_nqueues; i++) {
   6840 					wmq = &sc->sc_queue[i];
   6841 					qid = wmq->wmq_id;
   6842 					qintr_idx = wmq->wmq_intr_idx;
   6843 
   6844 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6845 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6846 					ivar |= __SHIFTIN((qintr_idx
   6847 						| IVAR_VALID),
   6848 					    IVAR_TX_MASK_Q(qid));
   6849 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6850 					ivar |= __SHIFTIN((qintr_idx
   6851 						| IVAR_VALID),
   6852 					    IVAR_RX_MASK_Q(qid));
   6853 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6854 				}
   6855 				break;
   6856 			case WM_T_82576:
   6857 				/* TX and RX */
   6858 				for (i = 0; i < sc->sc_nqueues; i++) {
   6859 					wmq = &sc->sc_queue[i];
   6860 					qid = wmq->wmq_id;
   6861 					qintr_idx = wmq->wmq_intr_idx;
   6862 
   6863 					ivar = CSR_READ(sc,
   6864 					    WMREG_IVAR_Q_82576(qid));
   6865 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6866 					ivar |= __SHIFTIN((qintr_idx
   6867 						| IVAR_VALID),
   6868 					    IVAR_TX_MASK_Q_82576(qid));
   6869 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6870 					ivar |= __SHIFTIN((qintr_idx
   6871 						| IVAR_VALID),
   6872 					    IVAR_RX_MASK_Q_82576(qid));
   6873 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6874 					    ivar);
   6875 				}
   6876 				break;
   6877 			default:
   6878 				break;
   6879 			}
   6880 
   6881 			/* Link status */
   6882 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6883 			    IVAR_MISC_OTHER);
   6884 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6885 		}
   6886 
   6887 		if (wm_is_using_multiqueue(sc)) {
   6888 			wm_init_rss(sc);
   6889 
   6890 			/*
   6891 			** NOTE: Receive Full-Packet Checksum Offload
   6892 			** is mutually exclusive with Multiqueue. However
   6893 			** this is not the same as TCP/IP checksums which
   6894 			** still work.
   6895 			*/
   6896 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6897 			reg |= RXCSUM_PCSD;
   6898 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6899 		}
   6900 	}
   6901 
   6902 	/* Set up the interrupt registers. */
   6903 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6904 
   6905 	/* Enable SFP module insertion interrupt if it's required */
   6906 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6907 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6908 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6909 		sfp_mask = ICR_GPI(0);
   6910 	}
   6911 
   6912 	if (wm_is_using_msix(sc)) {
   6913 		uint32_t mask;
   6914 		struct wm_queue *wmq;
   6915 
   6916 		switch (sc->sc_type) {
   6917 		case WM_T_82574:
   6918 			mask = 0;
   6919 			for (i = 0; i < sc->sc_nqueues; i++) {
   6920 				wmq = &sc->sc_queue[i];
   6921 				mask |= ICR_TXQ(wmq->wmq_id);
   6922 				mask |= ICR_RXQ(wmq->wmq_id);
   6923 			}
   6924 			mask |= ICR_OTHER;
   6925 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6926 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6927 			break;
   6928 		default:
   6929 			if (sc->sc_type == WM_T_82575) {
   6930 				mask = 0;
   6931 				for (i = 0; i < sc->sc_nqueues; i++) {
   6932 					wmq = &sc->sc_queue[i];
   6933 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6934 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6935 				}
   6936 				mask |= EITR_OTHER;
   6937 			} else {
   6938 				mask = 0;
   6939 				for (i = 0; i < sc->sc_nqueues; i++) {
   6940 					wmq = &sc->sc_queue[i];
   6941 					mask |= 1 << wmq->wmq_intr_idx;
   6942 				}
   6943 				mask |= 1 << sc->sc_link_intr_idx;
   6944 			}
   6945 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6946 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6947 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6948 
   6949 			/* For other interrupts */
   6950 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6951 			break;
   6952 		}
   6953 	} else {
   6954 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6955 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6956 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6957 	}
   6958 
   6959 	/* Set up the inter-packet gap. */
   6960 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6961 
   6962 	if (sc->sc_type >= WM_T_82543) {
   6963 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6964 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6965 			wm_itrs_writereg(sc, wmq);
   6966 		}
   6967 		/*
   6968 		 * Link interrupts occur much less than TX
   6969 		 * interrupts and RX interrupts. So, we don't
   6970 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6971 		 * FreeBSD's if_igb.
   6972 		 */
   6973 	}
   6974 
   6975 	/* Set the VLAN EtherType. */
   6976 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6977 
   6978 	/*
   6979 	 * Set up the transmit control register; we start out with
   6980 	 * a collision distance suitable for FDX, but update it when
   6981 	 * we resolve the media type.
   6982 	 */
   6983 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6984 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6985 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6986 	if (sc->sc_type >= WM_T_82571)
   6987 		sc->sc_tctl |= TCTL_MULR;
   6988 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6989 
   6990 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6991 		/* Write TDT after TCTL.EN is set. See the document. */
   6992 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6993 	}
   6994 
   6995 	if (sc->sc_type == WM_T_80003) {
   6996 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6997 		reg &= ~TCTL_EXT_GCEX_MASK;
   6998 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6999 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   7000 	}
   7001 
   7002 	/* Set the media. */
   7003 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   7004 		goto out;
   7005 
   7006 	/* Configure for OS presence */
   7007 	wm_init_manageability(sc);
   7008 
   7009 	/*
   7010 	 * Set up the receive control register; we actually program the
   7011 	 * register when we set the receive filter. Use multicast address
   7012 	 * offset type 0.
   7013 	 *
   7014 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   7015 	 * don't enable that feature.
   7016 	 */
   7017 	sc->sc_mchash_type = 0;
   7018 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   7019 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   7020 
   7021 	/* 82574 use one buffer extended Rx descriptor. */
   7022 	if (sc->sc_type == WM_T_82574)
   7023 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   7024 
   7025 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
   7026 		sc->sc_rctl |= RCTL_SECRC;
   7027 
   7028 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   7029 	    && (ifp->if_mtu > ETHERMTU)) {
   7030 		sc->sc_rctl |= RCTL_LPE;
   7031 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7032 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   7033 	}
   7034 
   7035 	if (MCLBYTES == 2048)
   7036 		sc->sc_rctl |= RCTL_2k;
   7037 	else {
   7038 		if (sc->sc_type >= WM_T_82543) {
   7039 			switch (MCLBYTES) {
   7040 			case 4096:
   7041 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   7042 				break;
   7043 			case 8192:
   7044 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   7045 				break;
   7046 			case 16384:
   7047 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   7048 				break;
   7049 			default:
   7050 				panic("wm_init: MCLBYTES %d unsupported",
   7051 				    MCLBYTES);
   7052 				break;
   7053 			}
   7054 		} else
   7055 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   7056 	}
   7057 
   7058 	/* Enable ECC */
   7059 	switch (sc->sc_type) {
   7060 	case WM_T_82571:
   7061 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   7062 		reg |= PBA_ECC_CORR_EN;
   7063 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   7064 		break;
   7065 	case WM_T_PCH_LPT:
   7066 	case WM_T_PCH_SPT:
   7067 	case WM_T_PCH_CNP:
   7068 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   7069 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   7070 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   7071 
   7072 		sc->sc_ctrl |= CTRL_MEHE;
   7073 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7074 		break;
   7075 	default:
   7076 		break;
   7077 	}
   7078 
   7079 	/*
   7080 	 * Set the receive filter.
   7081 	 *
   7082 	 * For 82575 and 82576, the RX descriptors must be initialized after
   7083 	 * the setting of RCTL.EN in wm_set_filter()
   7084 	 */
   7085 	wm_set_filter(sc);
   7086 
   7087 	/* On 575 and later set RDT only if RX enabled */
   7088 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7089 		int qidx;
   7090 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7091 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   7092 			for (i = 0; i < WM_NRXDESC; i++) {
   7093 				mutex_enter(rxq->rxq_lock);
   7094 				wm_init_rxdesc(rxq, i);
   7095 				mutex_exit(rxq->rxq_lock);
   7096 
   7097 			}
   7098 		}
   7099 	}
   7100 
   7101 	wm_unset_stopping_flags(sc);
   7102 
   7103 	/* Start the one second link check clock. */
   7104 	callout_schedule(&sc->sc_tick_ch, hz);
   7105 
   7106 	/*
   7107 	 * ...all done! (IFNET_LOCKED asserted above.)
   7108 	 */
   7109 	ifp->if_flags |= IFF_RUNNING;
   7110 
   7111 out:
   7112 	/* Save last flags for the callback */
   7113 	sc->sc_if_flags = ifp->if_flags;
   7114 	sc->sc_ec_capenable = ec->ec_capenable;
   7115 	if (error)
   7116 		log(LOG_ERR, "%s: interface not running\n",
   7117 		    device_xname(sc->sc_dev));
   7118 	return error;
   7119 }
   7120 
   7121 /*
   7122  * wm_stop:		[ifnet interface function]
   7123  *
   7124  *	Stop transmission on the interface.
   7125  */
   7126 static void
   7127 wm_stop(struct ifnet *ifp, int disable)
   7128 {
   7129 	struct wm_softc *sc = ifp->if_softc;
   7130 
   7131 	ASSERT_SLEEPABLE();
   7132 	KASSERT(IFNET_LOCKED(ifp));
   7133 
   7134 	mutex_enter(sc->sc_core_lock);
   7135 	wm_stop_locked(ifp, disable ? true : false, true);
   7136 	mutex_exit(sc->sc_core_lock);
   7137 
   7138 	/*
   7139 	 * After wm_set_stopping_flags(), it is guaranteed that
   7140 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   7141 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   7142 	 * because it can sleep...
   7143 	 * so, call workqueue_wait() here.
   7144 	 */
   7145 	for (int i = 0; i < sc->sc_nqueues; i++)
   7146 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   7147 	workqueue_wait(sc->sc_reset_wq, &sc->sc_reset_work);
   7148 }
   7149 
   7150 static void
   7151 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   7152 {
   7153 	struct wm_softc *sc = ifp->if_softc;
   7154 	struct wm_txsoft *txs;
   7155 	int i, qidx;
   7156 
   7157 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7158 		device_xname(sc->sc_dev), __func__));
   7159 	KASSERT(IFNET_LOCKED(ifp));
   7160 	KASSERT(mutex_owned(sc->sc_core_lock));
   7161 
   7162 	wm_set_stopping_flags(sc);
   7163 
   7164 	if (sc->sc_flags & WM_F_HAS_MII) {
   7165 		/* Down the MII. */
   7166 		mii_down(&sc->sc_mii);
   7167 	} else {
   7168 #if 0
   7169 		/* Should we clear PHY's status properly? */
   7170 		wm_reset(sc);
   7171 #endif
   7172 	}
   7173 
   7174 	/* Stop the transmit and receive processes. */
   7175 	CSR_WRITE(sc, WMREG_TCTL, 0);
   7176 	CSR_WRITE(sc, WMREG_RCTL, 0);
   7177 	sc->sc_rctl &= ~RCTL_EN;
   7178 
   7179 	/*
   7180 	 * Clear the interrupt mask to ensure the device cannot assert its
   7181 	 * interrupt line.
   7182 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   7183 	 * service any currently pending or shared interrupt.
   7184 	 */
   7185 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   7186 	sc->sc_icr = 0;
   7187 	if (wm_is_using_msix(sc)) {
   7188 		if (sc->sc_type != WM_T_82574) {
   7189 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   7190 			CSR_WRITE(sc, WMREG_EIAC, 0);
   7191 		} else
   7192 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   7193 	}
   7194 
   7195 	/*
   7196 	 * Stop callouts after interrupts are disabled; if we have
   7197 	 * to wait for them, we will be releasing the CORE_LOCK
   7198 	 * briefly, which will unblock interrupts on the current CPU.
   7199 	 */
   7200 
   7201 	/* Stop the one second clock. */
   7202 	if (wait)
   7203 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   7204 	else
   7205 		callout_stop(&sc->sc_tick_ch);
   7206 
   7207 	/* Stop the 82547 Tx FIFO stall check timer. */
   7208 	if (sc->sc_type == WM_T_82547) {
   7209 		if (wait)
   7210 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   7211 		else
   7212 			callout_stop(&sc->sc_txfifo_ch);
   7213 	}
   7214 
   7215 	/* Release any queued transmit buffers. */
   7216 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7217 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   7218 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7219 		struct mbuf *m;
   7220 
   7221 		mutex_enter(txq->txq_lock);
   7222 		txq->txq_sending = false; /* Ensure watchdog disabled */
   7223 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7224 			txs = &txq->txq_soft[i];
   7225 			if (txs->txs_mbuf != NULL) {
   7226 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   7227 				m_freem(txs->txs_mbuf);
   7228 				txs->txs_mbuf = NULL;
   7229 			}
   7230 		}
   7231 		/* Drain txq_interq */
   7232 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7233 			m_freem(m);
   7234 		mutex_exit(txq->txq_lock);
   7235 	}
   7236 
   7237 	/* Mark the interface as down and cancel the watchdog timer. */
   7238 	ifp->if_flags &= ~IFF_RUNNING;
   7239 	sc->sc_if_flags = ifp->if_flags;
   7240 
   7241 	if (disable) {
   7242 		for (i = 0; i < sc->sc_nqueues; i++) {
   7243 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7244 			mutex_enter(rxq->rxq_lock);
   7245 			wm_rxdrain(rxq);
   7246 			mutex_exit(rxq->rxq_lock);
   7247 		}
   7248 	}
   7249 
   7250 #if 0 /* notyet */
   7251 	if (sc->sc_type >= WM_T_82544)
   7252 		CSR_WRITE(sc, WMREG_WUC, 0);
   7253 #endif
   7254 }
   7255 
   7256 static void
   7257 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   7258 {
   7259 	struct mbuf *m;
   7260 	int i;
   7261 
   7262 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   7263 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   7264 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   7265 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   7266 		    m->m_data, m->m_len, m->m_flags);
   7267 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   7268 	    i, i == 1 ? "" : "s");
   7269 }
   7270 
   7271 /*
   7272  * wm_82547_txfifo_stall:
   7273  *
   7274  *	Callout used to wait for the 82547 Tx FIFO to drain,
   7275  *	reset the FIFO pointers, and restart packet transmission.
   7276  */
   7277 static void
   7278 wm_82547_txfifo_stall(void *arg)
   7279 {
   7280 	struct wm_softc *sc = arg;
   7281 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7282 
   7283 	mutex_enter(txq->txq_lock);
   7284 
   7285 	if (txq->txq_stopping)
   7286 		goto out;
   7287 
   7288 	if (txq->txq_fifo_stall) {
   7289 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   7290 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   7291 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   7292 			/*
   7293 			 * Packets have drained.  Stop transmitter, reset
   7294 			 * FIFO pointers, restart transmitter, and kick
   7295 			 * the packet queue.
   7296 			 */
   7297 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   7298 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   7299 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   7300 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   7301 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   7302 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   7303 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   7304 			CSR_WRITE_FLUSH(sc);
   7305 
   7306 			txq->txq_fifo_head = 0;
   7307 			txq->txq_fifo_stall = 0;
   7308 			wm_start_locked(&sc->sc_ethercom.ec_if);
   7309 		} else {
   7310 			/*
   7311 			 * Still waiting for packets to drain; try again in
   7312 			 * another tick.
   7313 			 */
   7314 			callout_schedule(&sc->sc_txfifo_ch, 1);
   7315 		}
   7316 	}
   7317 
   7318 out:
   7319 	mutex_exit(txq->txq_lock);
   7320 }
   7321 
   7322 /*
   7323  * wm_82547_txfifo_bugchk:
   7324  *
   7325  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   7326  *	prevent enqueueing a packet that would wrap around the end
   7327  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   7328  *
   7329  *	We do this by checking the amount of space before the end
   7330  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   7331  *	the Tx FIFO, wait for all remaining packets to drain, reset
   7332  *	the internal FIFO pointers to the beginning, and restart
   7333  *	transmission on the interface.
   7334  */
   7335 #define	WM_FIFO_HDR		0x10
   7336 #define	WM_82547_PAD_LEN	0x3e0
   7337 static int
   7338 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   7339 {
   7340 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7341 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   7342 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   7343 
   7344 	/* Just return if already stalled. */
   7345 	if (txq->txq_fifo_stall)
   7346 		return 1;
   7347 
   7348 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   7349 		/* Stall only occurs in half-duplex mode. */
   7350 		goto send_packet;
   7351 	}
   7352 
   7353 	if (len >= WM_82547_PAD_LEN + space) {
   7354 		txq->txq_fifo_stall = 1;
   7355 		callout_schedule(&sc->sc_txfifo_ch, 1);
   7356 		return 1;
   7357 	}
   7358 
   7359 send_packet:
   7360 	txq->txq_fifo_head += len;
   7361 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   7362 		txq->txq_fifo_head -= txq->txq_fifo_size;
   7363 
   7364 	return 0;
   7365 }
   7366 
   7367 static int
   7368 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7369 {
   7370 	int error;
   7371 
   7372 	/*
   7373 	 * Allocate the control data structures, and create and load the
   7374 	 * DMA map for it.
   7375 	 *
   7376 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7377 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7378 	 * both sets within the same 4G segment.
   7379 	 */
   7380 	if (sc->sc_type < WM_T_82544)
   7381 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   7382 	else
   7383 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   7384 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7385 		txq->txq_descsize = sizeof(nq_txdesc_t);
   7386 	else
   7387 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   7388 
   7389 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   7390 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   7391 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   7392 		aprint_error_dev(sc->sc_dev,
   7393 		    "unable to allocate TX control data, error = %d\n",
   7394 		    error);
   7395 		goto fail_0;
   7396 	}
   7397 
   7398 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   7399 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   7400 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7401 		aprint_error_dev(sc->sc_dev,
   7402 		    "unable to map TX control data, error = %d\n", error);
   7403 		goto fail_1;
   7404 	}
   7405 
   7406 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   7407 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   7408 		aprint_error_dev(sc->sc_dev,
   7409 		    "unable to create TX control data DMA map, error = %d\n",
   7410 		    error);
   7411 		goto fail_2;
   7412 	}
   7413 
   7414 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   7415 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   7416 		aprint_error_dev(sc->sc_dev,
   7417 		    "unable to load TX control data DMA map, error = %d\n",
   7418 		    error);
   7419 		goto fail_3;
   7420 	}
   7421 
   7422 	return 0;
   7423 
   7424 fail_3:
   7425 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7426 fail_2:
   7427 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7428 	    WM_TXDESCS_SIZE(txq));
   7429 fail_1:
   7430 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7431 fail_0:
   7432 	return error;
   7433 }
   7434 
   7435 static void
   7436 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7437 {
   7438 
   7439 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   7440 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7441 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7442 	    WM_TXDESCS_SIZE(txq));
   7443 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7444 }
   7445 
   7446 static int
   7447 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7448 {
   7449 	int error;
   7450 	size_t rxq_descs_size;
   7451 
   7452 	/*
   7453 	 * Allocate the control data structures, and create and load the
   7454 	 * DMA map for it.
   7455 	 *
   7456 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7457 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7458 	 * both sets within the same 4G segment.
   7459 	 */
   7460 	rxq->rxq_ndesc = WM_NRXDESC;
   7461 	if (sc->sc_type == WM_T_82574)
   7462 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   7463 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7464 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   7465 	else
   7466 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   7467 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   7468 
   7469 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   7470 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   7471 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   7472 		aprint_error_dev(sc->sc_dev,
   7473 		    "unable to allocate RX control data, error = %d\n",
   7474 		    error);
   7475 		goto fail_0;
   7476 	}
   7477 
   7478 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   7479 		    rxq->rxq_desc_rseg, rxq_descs_size,
   7480 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7481 		aprint_error_dev(sc->sc_dev,
   7482 		    "unable to map RX control data, error = %d\n", error);
   7483 		goto fail_1;
   7484 	}
   7485 
   7486 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   7487 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   7488 		aprint_error_dev(sc->sc_dev,
   7489 		    "unable to create RX control data DMA map, error = %d\n",
   7490 		    error);
   7491 		goto fail_2;
   7492 	}
   7493 
   7494 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   7495 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   7496 		aprint_error_dev(sc->sc_dev,
   7497 		    "unable to load RX control data DMA map, error = %d\n",
   7498 		    error);
   7499 		goto fail_3;
   7500 	}
   7501 
   7502 	return 0;
   7503 
   7504  fail_3:
   7505 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7506  fail_2:
   7507 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7508 	    rxq_descs_size);
   7509  fail_1:
   7510 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7511  fail_0:
   7512 	return error;
   7513 }
   7514 
   7515 static void
   7516 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7517 {
   7518 
   7519 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7520 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7521 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7522 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   7523 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7524 }
   7525 
   7526 
   7527 static int
   7528 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7529 {
   7530 	int i, error;
   7531 
   7532 	/* Create the transmit buffer DMA maps. */
   7533 	WM_TXQUEUELEN(txq) =
   7534 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   7535 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   7536 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7537 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   7538 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   7539 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   7540 			aprint_error_dev(sc->sc_dev,
   7541 			    "unable to create Tx DMA map %d, error = %d\n",
   7542 			    i, error);
   7543 			goto fail;
   7544 		}
   7545 	}
   7546 
   7547 	return 0;
   7548 
   7549 fail:
   7550 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7551 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7552 			bus_dmamap_destroy(sc->sc_dmat,
   7553 			    txq->txq_soft[i].txs_dmamap);
   7554 	}
   7555 	return error;
   7556 }
   7557 
   7558 static void
   7559 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7560 {
   7561 	int i;
   7562 
   7563 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7564 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7565 			bus_dmamap_destroy(sc->sc_dmat,
   7566 			    txq->txq_soft[i].txs_dmamap);
   7567 	}
   7568 }
   7569 
   7570 static int
   7571 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7572 {
   7573 	int i, error;
   7574 
   7575 	/* Create the receive buffer DMA maps. */
   7576 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7577 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   7578 			    MCLBYTES, 0, 0,
   7579 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   7580 			aprint_error_dev(sc->sc_dev,
   7581 			    "unable to create Rx DMA map %d error = %d\n",
   7582 			    i, error);
   7583 			goto fail;
   7584 		}
   7585 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   7586 	}
   7587 
   7588 	return 0;
   7589 
   7590  fail:
   7591 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7592 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7593 			bus_dmamap_destroy(sc->sc_dmat,
   7594 			    rxq->rxq_soft[i].rxs_dmamap);
   7595 	}
   7596 	return error;
   7597 }
   7598 
   7599 static void
   7600 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7601 {
   7602 	int i;
   7603 
   7604 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7605 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7606 			bus_dmamap_destroy(sc->sc_dmat,
   7607 			    rxq->rxq_soft[i].rxs_dmamap);
   7608 	}
   7609 }
   7610 
   7611 /*
   7612  * wm_alloc_quques:
   7613  *	Allocate {tx,rx}descs and {tx,rx} buffers
   7614  */
   7615 static int
   7616 wm_alloc_txrx_queues(struct wm_softc *sc)
   7617 {
   7618 	int i, error, tx_done, rx_done;
   7619 
   7620 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   7621 	    KM_SLEEP);
   7622 	if (sc->sc_queue == NULL) {
   7623 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   7624 		error = ENOMEM;
   7625 		goto fail_0;
   7626 	}
   7627 
   7628 	/* For transmission */
   7629 	error = 0;
   7630 	tx_done = 0;
   7631 	for (i = 0; i < sc->sc_nqueues; i++) {
   7632 #ifdef WM_EVENT_COUNTERS
   7633 		int j;
   7634 		const char *xname;
   7635 #endif
   7636 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7637 		txq->txq_sc = sc;
   7638 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7639 
   7640 		error = wm_alloc_tx_descs(sc, txq);
   7641 		if (error)
   7642 			break;
   7643 		error = wm_alloc_tx_buffer(sc, txq);
   7644 		if (error) {
   7645 			wm_free_tx_descs(sc, txq);
   7646 			break;
   7647 		}
   7648 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   7649 		if (txq->txq_interq == NULL) {
   7650 			wm_free_tx_descs(sc, txq);
   7651 			wm_free_tx_buffer(sc, txq);
   7652 			error = ENOMEM;
   7653 			break;
   7654 		}
   7655 
   7656 #ifdef WM_EVENT_COUNTERS
   7657 		xname = device_xname(sc->sc_dev);
   7658 
   7659 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   7660 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   7661 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   7662 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   7663 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   7664 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   7665 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   7666 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   7667 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   7668 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   7669 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   7670 
   7671 		for (j = 0; j < WM_NTXSEGS; j++) {
   7672 			snprintf(txq->txq_txseg_evcnt_names[j],
   7673 			    sizeof(txq->txq_txseg_evcnt_names[j]),
   7674 			    "txq%02dtxseg%d", i, j);
   7675 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j],
   7676 			    EVCNT_TYPE_MISC,
   7677 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   7678 		}
   7679 
   7680 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   7681 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   7682 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   7683 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   7684 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   7685 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   7686 #endif /* WM_EVENT_COUNTERS */
   7687 
   7688 		tx_done++;
   7689 	}
   7690 	if (error)
   7691 		goto fail_1;
   7692 
   7693 	/* For receive */
   7694 	error = 0;
   7695 	rx_done = 0;
   7696 	for (i = 0; i < sc->sc_nqueues; i++) {
   7697 #ifdef WM_EVENT_COUNTERS
   7698 		const char *xname;
   7699 #endif
   7700 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7701 		rxq->rxq_sc = sc;
   7702 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7703 
   7704 		error = wm_alloc_rx_descs(sc, rxq);
   7705 		if (error)
   7706 			break;
   7707 
   7708 		error = wm_alloc_rx_buffer(sc, rxq);
   7709 		if (error) {
   7710 			wm_free_rx_descs(sc, rxq);
   7711 			break;
   7712 		}
   7713 
   7714 #ifdef WM_EVENT_COUNTERS
   7715 		xname = device_xname(sc->sc_dev);
   7716 
   7717 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   7718 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   7719 
   7720 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   7721 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   7722 #endif /* WM_EVENT_COUNTERS */
   7723 
   7724 		rx_done++;
   7725 	}
   7726 	if (error)
   7727 		goto fail_2;
   7728 
   7729 	return 0;
   7730 
   7731 fail_2:
   7732 	for (i = 0; i < rx_done; i++) {
   7733 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7734 		wm_free_rx_buffer(sc, rxq);
   7735 		wm_free_rx_descs(sc, rxq);
   7736 		if (rxq->rxq_lock)
   7737 			mutex_obj_free(rxq->rxq_lock);
   7738 	}
   7739 fail_1:
   7740 	for (i = 0; i < tx_done; i++) {
   7741 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7742 		pcq_destroy(txq->txq_interq);
   7743 		wm_free_tx_buffer(sc, txq);
   7744 		wm_free_tx_descs(sc, txq);
   7745 		if (txq->txq_lock)
   7746 			mutex_obj_free(txq->txq_lock);
   7747 	}
   7748 
   7749 	kmem_free(sc->sc_queue,
   7750 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   7751 fail_0:
   7752 	return error;
   7753 }
   7754 
   7755 /*
   7756  * wm_free_quques:
   7757  *	Free {tx,rx}descs and {tx,rx} buffers
   7758  */
   7759 static void
   7760 wm_free_txrx_queues(struct wm_softc *sc)
   7761 {
   7762 	int i;
   7763 
   7764 	for (i = 0; i < sc->sc_nqueues; i++) {
   7765 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7766 
   7767 #ifdef WM_EVENT_COUNTERS
   7768 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7769 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7770 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7771 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7772 #endif /* WM_EVENT_COUNTERS */
   7773 
   7774 		wm_free_rx_buffer(sc, rxq);
   7775 		wm_free_rx_descs(sc, rxq);
   7776 		if (rxq->rxq_lock)
   7777 			mutex_obj_free(rxq->rxq_lock);
   7778 	}
   7779 
   7780 	for (i = 0; i < sc->sc_nqueues; i++) {
   7781 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7782 		struct mbuf *m;
   7783 #ifdef WM_EVENT_COUNTERS
   7784 		int j;
   7785 
   7786 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7787 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7788 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7789 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7790 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7791 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7792 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7793 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7794 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7795 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7796 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7797 
   7798 		for (j = 0; j < WM_NTXSEGS; j++)
   7799 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7800 
   7801 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7802 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7803 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7804 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7805 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7806 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   7807 #endif /* WM_EVENT_COUNTERS */
   7808 
   7809 		/* Drain txq_interq */
   7810 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7811 			m_freem(m);
   7812 		pcq_destroy(txq->txq_interq);
   7813 
   7814 		wm_free_tx_buffer(sc, txq);
   7815 		wm_free_tx_descs(sc, txq);
   7816 		if (txq->txq_lock)
   7817 			mutex_obj_free(txq->txq_lock);
   7818 	}
   7819 
   7820 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   7821 }
   7822 
   7823 static void
   7824 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7825 {
   7826 
   7827 	KASSERT(mutex_owned(txq->txq_lock));
   7828 
   7829 	/* Initialize the transmit descriptor ring. */
   7830 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   7831 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   7832 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7833 	txq->txq_free = WM_NTXDESC(txq);
   7834 	txq->txq_next = 0;
   7835 }
   7836 
   7837 static void
   7838 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7839     struct wm_txqueue *txq)
   7840 {
   7841 
   7842 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7843 		device_xname(sc->sc_dev), __func__));
   7844 	KASSERT(mutex_owned(txq->txq_lock));
   7845 
   7846 	if (sc->sc_type < WM_T_82543) {
   7847 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   7848 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   7849 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   7850 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   7851 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   7852 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   7853 	} else {
   7854 		int qid = wmq->wmq_id;
   7855 
   7856 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   7857 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   7858 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   7859 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   7860 
   7861 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7862 			/*
   7863 			 * Don't write TDT before TCTL.EN is set.
   7864 			 * See the document.
   7865 			 */
   7866 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7867 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7868 			    | TXDCTL_WTHRESH(0));
   7869 		else {
   7870 			/* XXX should update with AIM? */
   7871 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7872 			if (sc->sc_type >= WM_T_82540) {
   7873 				/* Should be the same */
   7874 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7875 			}
   7876 
   7877 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7878 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7879 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7880 		}
   7881 	}
   7882 }
   7883 
   7884 static void
   7885 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7886 {
   7887 	int i;
   7888 
   7889 	KASSERT(mutex_owned(txq->txq_lock));
   7890 
   7891 	/* Initialize the transmit job descriptors. */
   7892 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7893 		txq->txq_soft[i].txs_mbuf = NULL;
   7894 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7895 	txq->txq_snext = 0;
   7896 	txq->txq_sdirty = 0;
   7897 }
   7898 
   7899 static void
   7900 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7901     struct wm_txqueue *txq)
   7902 {
   7903 
   7904 	KASSERT(mutex_owned(txq->txq_lock));
   7905 
   7906 	/*
   7907 	 * Set up some register offsets that are different between
   7908 	 * the i82542 and the i82543 and later chips.
   7909 	 */
   7910 	if (sc->sc_type < WM_T_82543)
   7911 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7912 	else
   7913 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7914 
   7915 	wm_init_tx_descs(sc, txq);
   7916 	wm_init_tx_regs(sc, wmq, txq);
   7917 	wm_init_tx_buffer(sc, txq);
   7918 
   7919 	/* Clear other than WM_TXQ_LINKDOWN_DISCARD */
   7920 	txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD;
   7921 
   7922 	txq->txq_sending = false;
   7923 }
   7924 
   7925 static void
   7926 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7927     struct wm_rxqueue *rxq)
   7928 {
   7929 
   7930 	KASSERT(mutex_owned(rxq->rxq_lock));
   7931 
   7932 	/*
   7933 	 * Initialize the receive descriptor and receive job
   7934 	 * descriptor rings.
   7935 	 */
   7936 	if (sc->sc_type < WM_T_82543) {
   7937 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7938 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7939 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7940 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7941 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7942 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7943 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7944 
   7945 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7946 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7947 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7948 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7949 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7950 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7951 	} else {
   7952 		int qid = wmq->wmq_id;
   7953 
   7954 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7955 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7956 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7957 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7958 
   7959 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7960 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7961 				panic("%s: MCLBYTES %d unsupported for 82575 "
   7962 				    "or higher\n", __func__, MCLBYTES);
   7963 
   7964 			/*
   7965 			 * Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF
   7966 			 * only.
   7967 			 */
   7968 			CSR_WRITE(sc, WMREG_SRRCTL(qid),
   7969 			    SRRCTL_DESCTYPE_ADV_ONEBUF
   7970 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7971 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7972 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7973 			    | RXDCTL_WTHRESH(1));
   7974 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7975 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7976 		} else {
   7977 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7978 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7979 			/* XXX should update with AIM? */
   7980 			CSR_WRITE(sc, WMREG_RDTR,
   7981 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7982 			/* MUST be same */
   7983 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7984 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7985 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7986 		}
   7987 	}
   7988 }
   7989 
   7990 static int
   7991 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7992 {
   7993 	struct wm_rxsoft *rxs;
   7994 	int error, i;
   7995 
   7996 	KASSERT(mutex_owned(rxq->rxq_lock));
   7997 
   7998 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7999 		rxs = &rxq->rxq_soft[i];
   8000 		if (rxs->rxs_mbuf == NULL) {
   8001 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   8002 				log(LOG_ERR, "%s: unable to allocate or map "
   8003 				    "rx buffer %d, error = %d\n",
   8004 				    device_xname(sc->sc_dev), i, error);
   8005 				/*
   8006 				 * XXX Should attempt to run with fewer receive
   8007 				 * XXX buffers instead of just failing.
   8008 				 */
   8009 				wm_rxdrain(rxq);
   8010 				return ENOMEM;
   8011 			}
   8012 		} else {
   8013 			/*
   8014 			 * For 82575 and 82576, the RX descriptors must be
   8015 			 * initialized after the setting of RCTL.EN in
   8016 			 * wm_set_filter()
   8017 			 */
   8018 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   8019 				wm_init_rxdesc(rxq, i);
   8020 		}
   8021 	}
   8022 	rxq->rxq_ptr = 0;
   8023 	rxq->rxq_discard = 0;
   8024 	WM_RXCHAIN_RESET(rxq);
   8025 
   8026 	return 0;
   8027 }
   8028 
   8029 static int
   8030 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   8031     struct wm_rxqueue *rxq)
   8032 {
   8033 
   8034 	KASSERT(mutex_owned(rxq->rxq_lock));
   8035 
   8036 	/*
   8037 	 * Set up some register offsets that are different between
   8038 	 * the i82542 and the i82543 and later chips.
   8039 	 */
   8040 	if (sc->sc_type < WM_T_82543)
   8041 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   8042 	else
   8043 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   8044 
   8045 	wm_init_rx_regs(sc, wmq, rxq);
   8046 	return wm_init_rx_buffer(sc, rxq);
   8047 }
   8048 
   8049 /*
   8050  * wm_init_quques:
   8051  *	Initialize {tx,rx}descs and {tx,rx} buffers
   8052  */
   8053 static int
   8054 wm_init_txrx_queues(struct wm_softc *sc)
   8055 {
   8056 	int i, error = 0;
   8057 
   8058 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   8059 		device_xname(sc->sc_dev), __func__));
   8060 
   8061 	for (i = 0; i < sc->sc_nqueues; i++) {
   8062 		struct wm_queue *wmq = &sc->sc_queue[i];
   8063 		struct wm_txqueue *txq = &wmq->wmq_txq;
   8064 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8065 
   8066 		/*
   8067 		 * TODO
   8068 		 * Currently, use constant variable instead of AIM.
   8069 		 * Furthermore, the interrupt interval of multiqueue which use
   8070 		 * polling mode is less than default value.
   8071 		 * More tuning and AIM are required.
   8072 		 */
   8073 		if (wm_is_using_multiqueue(sc))
   8074 			wmq->wmq_itr = 50;
   8075 		else
   8076 			wmq->wmq_itr = sc->sc_itr_init;
   8077 		wmq->wmq_set_itr = true;
   8078 
   8079 		mutex_enter(txq->txq_lock);
   8080 		wm_init_tx_queue(sc, wmq, txq);
   8081 		mutex_exit(txq->txq_lock);
   8082 
   8083 		mutex_enter(rxq->rxq_lock);
   8084 		error = wm_init_rx_queue(sc, wmq, rxq);
   8085 		mutex_exit(rxq->rxq_lock);
   8086 		if (error)
   8087 			break;
   8088 	}
   8089 
   8090 	return error;
   8091 }
   8092 
   8093 /*
   8094  * wm_tx_offload:
   8095  *
   8096  *	Set up TCP/IP checksumming parameters for the
   8097  *	specified packet.
   8098  */
   8099 static void
   8100 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8101     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   8102 {
   8103 	struct mbuf *m0 = txs->txs_mbuf;
   8104 	struct livengood_tcpip_ctxdesc *t;
   8105 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   8106 	uint32_t ipcse;
   8107 	struct ether_header *eh;
   8108 	int offset, iphl;
   8109 	uint8_t fields;
   8110 
   8111 	/*
   8112 	 * XXX It would be nice if the mbuf pkthdr had offset
   8113 	 * fields for the protocol headers.
   8114 	 */
   8115 
   8116 	eh = mtod(m0, struct ether_header *);
   8117 	switch (htons(eh->ether_type)) {
   8118 	case ETHERTYPE_IP:
   8119 	case ETHERTYPE_IPV6:
   8120 		offset = ETHER_HDR_LEN;
   8121 		break;
   8122 
   8123 	case ETHERTYPE_VLAN:
   8124 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8125 		break;
   8126 
   8127 	default:
   8128 		/* Don't support this protocol or encapsulation. */
   8129 		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8130 		txq->txq_last_hw_ipcs = 0;
   8131 		txq->txq_last_hw_tucs = 0;
   8132 		*fieldsp = 0;
   8133 		*cmdp = 0;
   8134 		return;
   8135 	}
   8136 
   8137 	if ((m0->m_pkthdr.csum_flags &
   8138 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8139 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8140 	} else
   8141 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8142 
   8143 	ipcse = offset + iphl - 1;
   8144 
   8145 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   8146 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   8147 	seg = 0;
   8148 	fields = 0;
   8149 
   8150 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8151 		int hlen = offset + iphl;
   8152 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8153 
   8154 		if (__predict_false(m0->m_len <
   8155 				    (hlen + sizeof(struct tcphdr)))) {
   8156 			/*
   8157 			 * TCP/IP headers are not in the first mbuf; we need
   8158 			 * to do this the slow and painful way. Let's just
   8159 			 * hope this doesn't happen very often.
   8160 			 */
   8161 			struct tcphdr th;
   8162 
   8163 			WM_Q_EVCNT_INCR(txq, tsopain);
   8164 
   8165 			m_copydata(m0, hlen, sizeof(th), &th);
   8166 			if (v4) {
   8167 				struct ip ip;
   8168 
   8169 				m_copydata(m0, offset, sizeof(ip), &ip);
   8170 				ip.ip_len = 0;
   8171 				m_copyback(m0,
   8172 				    offset + offsetof(struct ip, ip_len),
   8173 				    sizeof(ip.ip_len), &ip.ip_len);
   8174 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8175 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8176 			} else {
   8177 				struct ip6_hdr ip6;
   8178 
   8179 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8180 				ip6.ip6_plen = 0;
   8181 				m_copyback(m0,
   8182 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8183 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8184 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8185 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8186 			}
   8187 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8188 			    sizeof(th.th_sum), &th.th_sum);
   8189 
   8190 			hlen += th.th_off << 2;
   8191 		} else {
   8192 			/*
   8193 			 * TCP/IP headers are in the first mbuf; we can do
   8194 			 * this the easy way.
   8195 			 */
   8196 			struct tcphdr *th;
   8197 
   8198 			if (v4) {
   8199 				struct ip *ip =
   8200 				    (void *)(mtod(m0, char *) + offset);
   8201 				th = (void *)(mtod(m0, char *) + hlen);
   8202 
   8203 				ip->ip_len = 0;
   8204 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8205 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8206 			} else {
   8207 				struct ip6_hdr *ip6 =
   8208 				    (void *)(mtod(m0, char *) + offset);
   8209 				th = (void *)(mtod(m0, char *) + hlen);
   8210 
   8211 				ip6->ip6_plen = 0;
   8212 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8213 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8214 			}
   8215 			hlen += th->th_off << 2;
   8216 		}
   8217 
   8218 		if (v4) {
   8219 			WM_Q_EVCNT_INCR(txq, tso);
   8220 			cmdlen |= WTX_TCPIP_CMD_IP;
   8221 		} else {
   8222 			WM_Q_EVCNT_INCR(txq, tso6);
   8223 			ipcse = 0;
   8224 		}
   8225 		cmd |= WTX_TCPIP_CMD_TSE;
   8226 		cmdlen |= WTX_TCPIP_CMD_TSE |
   8227 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   8228 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   8229 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   8230 	}
   8231 
   8232 	/*
   8233 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   8234 	 * offload feature, if we load the context descriptor, we
   8235 	 * MUST provide valid values for IPCSS and TUCSS fields.
   8236 	 */
   8237 
   8238 	ipcs = WTX_TCPIP_IPCSS(offset) |
   8239 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   8240 	    WTX_TCPIP_IPCSE(ipcse);
   8241 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   8242 		WM_Q_EVCNT_INCR(txq, ipsum);
   8243 		fields |= WTX_IXSM;
   8244 	}
   8245 
   8246 	offset += iphl;
   8247 
   8248 	if (m0->m_pkthdr.csum_flags &
   8249 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   8250 		WM_Q_EVCNT_INCR(txq, tusum);
   8251 		fields |= WTX_TXSM;
   8252 		tucs = WTX_TCPIP_TUCSS(offset) |
   8253 		    WTX_TCPIP_TUCSO(offset +
   8254 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   8255 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8256 	} else if ((m0->m_pkthdr.csum_flags &
   8257 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   8258 		WM_Q_EVCNT_INCR(txq, tusum6);
   8259 		fields |= WTX_TXSM;
   8260 		tucs = WTX_TCPIP_TUCSS(offset) |
   8261 		    WTX_TCPIP_TUCSO(offset +
   8262 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   8263 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8264 	} else {
   8265 		/* Just initialize it to a valid TCP context. */
   8266 		tucs = WTX_TCPIP_TUCSS(offset) |
   8267 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   8268 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8269 	}
   8270 
   8271 	*cmdp = cmd;
   8272 	*fieldsp = fields;
   8273 
   8274 	/*
   8275 	 * We don't have to write context descriptor for every packet
   8276 	 * except for 82574. For 82574, we must write context descriptor
   8277 	 * for every packet when we use two descriptor queues.
   8278 	 *
   8279 	 * The 82574L can only remember the *last* context used
   8280 	 * regardless of queue that it was use for.  We cannot reuse
   8281 	 * contexts on this hardware platform and must generate a new
   8282 	 * context every time.  82574L hardware spec, section 7.2.6,
   8283 	 * second note.
   8284 	 */
   8285 	if (sc->sc_nqueues < 2) {
   8286 		/*
   8287 		 * Setting up new checksum offload context for every
   8288 		 * frames takes a lot of processing time for hardware.
   8289 		 * This also reduces performance a lot for small sized
   8290 		 * frames so avoid it if driver can use previously
   8291 		 * configured checksum offload context.
   8292 		 * For TSO, in theory we can use the same TSO context only if
   8293 		 * frame is the same type(IP/TCP) and the same MSS. However
   8294 		 * checking whether a frame has the same IP/TCP structure is a
   8295 		 * hard thing so just ignore that and always restablish a
   8296 		 * new TSO context.
   8297 		 */
   8298 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   8299 		    == 0) {
   8300 			if (txq->txq_last_hw_cmd == cmd &&
   8301 			    txq->txq_last_hw_fields == fields &&
   8302 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   8303 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   8304 				WM_Q_EVCNT_INCR(txq, skipcontext);
   8305 				return;
   8306 			}
   8307 		}
   8308 
   8309 		txq->txq_last_hw_cmd = cmd;
   8310 		txq->txq_last_hw_fields = fields;
   8311 		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   8312 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   8313 	}
   8314 
   8315 	/* Fill in the context descriptor. */
   8316 	t = (struct livengood_tcpip_ctxdesc *)
   8317 	    &txq->txq_descs[txq->txq_next];
   8318 	t->tcpip_ipcs = htole32(ipcs);
   8319 	t->tcpip_tucs = htole32(tucs);
   8320 	t->tcpip_cmdlen = htole32(cmdlen);
   8321 	t->tcpip_seg = htole32(seg);
   8322 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8323 
   8324 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8325 	txs->txs_ndesc++;
   8326 }
   8327 
   8328 static inline int
   8329 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   8330 {
   8331 	struct wm_softc *sc = ifp->if_softc;
   8332 	u_int cpuid = cpu_index(curcpu());
   8333 
   8334 	/*
   8335 	 * Currently, simple distribute strategy.
   8336 	 * TODO:
   8337 	 * distribute by flowid(RSS has value).
   8338 	 */
   8339 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   8340 }
   8341 
   8342 static inline bool
   8343 wm_linkdown_discard(struct wm_txqueue *txq)
   8344 {
   8345 
   8346 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
   8347 		return true;
   8348 
   8349 	return false;
   8350 }
   8351 
   8352 /*
   8353  * wm_start:		[ifnet interface function]
   8354  *
   8355  *	Start packet transmission on the interface.
   8356  */
   8357 static void
   8358 wm_start(struct ifnet *ifp)
   8359 {
   8360 	struct wm_softc *sc = ifp->if_softc;
   8361 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8362 
   8363 	KASSERT(if_is_mpsafe(ifp));
   8364 	/*
   8365 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8366 	 */
   8367 
   8368 	mutex_enter(txq->txq_lock);
   8369 	if (!txq->txq_stopping)
   8370 		wm_start_locked(ifp);
   8371 	mutex_exit(txq->txq_lock);
   8372 }
   8373 
   8374 static void
   8375 wm_start_locked(struct ifnet *ifp)
   8376 {
   8377 	struct wm_softc *sc = ifp->if_softc;
   8378 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8379 
   8380 	wm_send_common_locked(ifp, txq, false);
   8381 }
   8382 
   8383 static int
   8384 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   8385 {
   8386 	int qid;
   8387 	struct wm_softc *sc = ifp->if_softc;
   8388 	struct wm_txqueue *txq;
   8389 
   8390 	qid = wm_select_txqueue(ifp, m);
   8391 	txq = &sc->sc_queue[qid].wmq_txq;
   8392 
   8393 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8394 		m_freem(m);
   8395 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8396 		return ENOBUFS;
   8397 	}
   8398 
   8399 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8400 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8401 	if (m->m_flags & M_MCAST)
   8402 		if_statinc_ref(nsr, if_omcasts);
   8403 	IF_STAT_PUTREF(ifp);
   8404 
   8405 	if (mutex_tryenter(txq->txq_lock)) {
   8406 		if (!txq->txq_stopping)
   8407 			wm_transmit_locked(ifp, txq);
   8408 		mutex_exit(txq->txq_lock);
   8409 	}
   8410 
   8411 	return 0;
   8412 }
   8413 
   8414 static void
   8415 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8416 {
   8417 
   8418 	wm_send_common_locked(ifp, txq, true);
   8419 }
   8420 
   8421 static void
   8422 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8423     bool is_transmit)
   8424 {
   8425 	struct wm_softc *sc = ifp->if_softc;
   8426 	struct mbuf *m0;
   8427 	struct wm_txsoft *txs;
   8428 	bus_dmamap_t dmamap;
   8429 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   8430 	bus_addr_t curaddr;
   8431 	bus_size_t seglen, curlen;
   8432 	uint32_t cksumcmd;
   8433 	uint8_t cksumfields;
   8434 	bool remap = true;
   8435 
   8436 	KASSERT(mutex_owned(txq->txq_lock));
   8437 	KASSERT(!txq->txq_stopping);
   8438 
   8439 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8440 		return;
   8441 
   8442 	if (__predict_false(wm_linkdown_discard(txq))) {
   8443 		do {
   8444 			if (is_transmit)
   8445 				m0 = pcq_get(txq->txq_interq);
   8446 			else
   8447 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   8448 			/*
   8449 			 * increment successed packet counter as in the case
   8450 			 * which the packet is discarded by link down PHY.
   8451 			 */
   8452 			if (m0 != NULL) {
   8453 				if_statinc(ifp, if_opackets);
   8454 				m_freem(m0);
   8455 			}
   8456 		} while (m0 != NULL);
   8457 		return;
   8458 	}
   8459 
   8460 	/* Remember the previous number of free descriptors. */
   8461 	ofree = txq->txq_free;
   8462 
   8463 	/*
   8464 	 * Loop through the send queue, setting up transmit descriptors
   8465 	 * until we drain the queue, or use up all available transmit
   8466 	 * descriptors.
   8467 	 */
   8468 	for (;;) {
   8469 		m0 = NULL;
   8470 
   8471 		/* Get a work queue entry. */
   8472 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8473 			wm_txeof(txq, UINT_MAX);
   8474 			if (txq->txq_sfree == 0) {
   8475 				DPRINTF(sc, WM_DEBUG_TX,
   8476 				    ("%s: TX: no free job descriptors\n",
   8477 					device_xname(sc->sc_dev)));
   8478 				WM_Q_EVCNT_INCR(txq, txsstall);
   8479 				break;
   8480 			}
   8481 		}
   8482 
   8483 		/* Grab a packet off the queue. */
   8484 		if (is_transmit)
   8485 			m0 = pcq_get(txq->txq_interq);
   8486 		else
   8487 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8488 		if (m0 == NULL)
   8489 			break;
   8490 
   8491 		DPRINTF(sc, WM_DEBUG_TX,
   8492 		    ("%s: TX: have packet to transmit: %p\n",
   8493 			device_xname(sc->sc_dev), m0));
   8494 
   8495 		txs = &txq->txq_soft[txq->txq_snext];
   8496 		dmamap = txs->txs_dmamap;
   8497 
   8498 		use_tso = (m0->m_pkthdr.csum_flags &
   8499 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   8500 
   8501 		/*
   8502 		 * So says the Linux driver:
   8503 		 * The controller does a simple calculation to make sure
   8504 		 * there is enough room in the FIFO before initiating the
   8505 		 * DMA for each buffer. The calc is:
   8506 		 *	4 = ceil(buffer len / MSS)
   8507 		 * To make sure we don't overrun the FIFO, adjust the max
   8508 		 * buffer len if the MSS drops.
   8509 		 */
   8510 		dmamap->dm_maxsegsz =
   8511 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   8512 		    ? m0->m_pkthdr.segsz << 2
   8513 		    : WTX_MAX_LEN;
   8514 
   8515 		/*
   8516 		 * Load the DMA map.  If this fails, the packet either
   8517 		 * didn't fit in the allotted number of segments, or we
   8518 		 * were short on resources.  For the too-many-segments
   8519 		 * case, we simply report an error and drop the packet,
   8520 		 * since we can't sanely copy a jumbo packet to a single
   8521 		 * buffer.
   8522 		 */
   8523 retry:
   8524 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8525 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8526 		if (__predict_false(error)) {
   8527 			if (error == EFBIG) {
   8528 				if (remap == true) {
   8529 					struct mbuf *m;
   8530 
   8531 					remap = false;
   8532 					m = m_defrag(m0, M_NOWAIT);
   8533 					if (m != NULL) {
   8534 						WM_Q_EVCNT_INCR(txq, defrag);
   8535 						m0 = m;
   8536 						goto retry;
   8537 					}
   8538 				}
   8539 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8540 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8541 				    "DMA segments, dropping...\n",
   8542 				    device_xname(sc->sc_dev));
   8543 				wm_dump_mbuf_chain(sc, m0);
   8544 				m_freem(m0);
   8545 				continue;
   8546 			}
   8547 			/* Short on resources, just stop for now. */
   8548 			DPRINTF(sc, WM_DEBUG_TX,
   8549 			    ("%s: TX: dmamap load failed: %d\n",
   8550 				device_xname(sc->sc_dev), error));
   8551 			break;
   8552 		}
   8553 
   8554 		segs_needed = dmamap->dm_nsegs;
   8555 		if (use_tso) {
   8556 			/* For sentinel descriptor; see below. */
   8557 			segs_needed++;
   8558 		}
   8559 
   8560 		/*
   8561 		 * Ensure we have enough descriptors free to describe
   8562 		 * the packet. Note, we always reserve one descriptor
   8563 		 * at the end of the ring due to the semantics of the
   8564 		 * TDT register, plus one more in the event we need
   8565 		 * to load offload context.
   8566 		 */
   8567 		if (segs_needed > txq->txq_free - 2) {
   8568 			/*
   8569 			 * Not enough free descriptors to transmit this
   8570 			 * packet.  We haven't committed anything yet,
   8571 			 * so just unload the DMA map, put the packet
   8572 			 * pack on the queue, and punt. Notify the upper
   8573 			 * layer that there are no more slots left.
   8574 			 */
   8575 			DPRINTF(sc, WM_DEBUG_TX,
   8576 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8577 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8578 				segs_needed, txq->txq_free - 1));
   8579 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8580 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8581 			WM_Q_EVCNT_INCR(txq, txdstall);
   8582 			break;
   8583 		}
   8584 
   8585 		/*
   8586 		 * Check for 82547 Tx FIFO bug. We need to do this
   8587 		 * once we know we can transmit the packet, since we
   8588 		 * do some internal FIFO space accounting here.
   8589 		 */
   8590 		if (sc->sc_type == WM_T_82547 &&
   8591 		    wm_82547_txfifo_bugchk(sc, m0)) {
   8592 			DPRINTF(sc, WM_DEBUG_TX,
   8593 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   8594 				device_xname(sc->sc_dev)));
   8595 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8596 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8597 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   8598 			break;
   8599 		}
   8600 
   8601 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8602 
   8603 		DPRINTF(sc, WM_DEBUG_TX,
   8604 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8605 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8606 
   8607 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8608 
   8609 		/*
   8610 		 * Store a pointer to the packet so that we can free it
   8611 		 * later.
   8612 		 *
   8613 		 * Initially, we consider the number of descriptors the
   8614 		 * packet uses the number of DMA segments.  This may be
   8615 		 * incremented by 1 if we do checksum offload (a descriptor
   8616 		 * is used to set the checksum context).
   8617 		 */
   8618 		txs->txs_mbuf = m0;
   8619 		txs->txs_firstdesc = txq->txq_next;
   8620 		txs->txs_ndesc = segs_needed;
   8621 
   8622 		/* Set up offload parameters for this packet. */
   8623 		if (m0->m_pkthdr.csum_flags &
   8624 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8625 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8626 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8627 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   8628 		} else {
   8629 			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8630 			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   8631 			cksumcmd = 0;
   8632 			cksumfields = 0;
   8633 		}
   8634 
   8635 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   8636 
   8637 		/* Sync the DMA map. */
   8638 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8639 		    BUS_DMASYNC_PREWRITE);
   8640 
   8641 		/* Initialize the transmit descriptor. */
   8642 		for (nexttx = txq->txq_next, seg = 0;
   8643 		     seg < dmamap->dm_nsegs; seg++) {
   8644 			for (seglen = dmamap->dm_segs[seg].ds_len,
   8645 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   8646 			     seglen != 0;
   8647 			     curaddr += curlen, seglen -= curlen,
   8648 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   8649 				curlen = seglen;
   8650 
   8651 				/*
   8652 				 * So says the Linux driver:
   8653 				 * Work around for premature descriptor
   8654 				 * write-backs in TSO mode.  Append a
   8655 				 * 4-byte sentinel descriptor.
   8656 				 */
   8657 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   8658 				    curlen > 8)
   8659 					curlen -= 4;
   8660 
   8661 				wm_set_dma_addr(
   8662 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   8663 				txq->txq_descs[nexttx].wtx_cmdlen
   8664 				    = htole32(cksumcmd | curlen);
   8665 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   8666 				    = 0;
   8667 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   8668 				    = cksumfields;
   8669 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8670 				lasttx = nexttx;
   8671 
   8672 				DPRINTF(sc, WM_DEBUG_TX,
   8673 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   8674 					"len %#04zx\n",
   8675 					device_xname(sc->sc_dev), nexttx,
   8676 					(uint64_t)curaddr, curlen));
   8677 			}
   8678 		}
   8679 
   8680 		KASSERT(lasttx != -1);
   8681 
   8682 		/*
   8683 		 * Set up the command byte on the last descriptor of
   8684 		 * the packet. If we're in the interrupt delay window,
   8685 		 * delay the interrupt.
   8686 		 */
   8687 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8688 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8689 
   8690 		/*
   8691 		 * If VLANs are enabled and the packet has a VLAN tag, set
   8692 		 * up the descriptor to encapsulate the packet for us.
   8693 		 *
   8694 		 * This is only valid on the last descriptor of the packet.
   8695 		 */
   8696 		if (vlan_has_tag(m0)) {
   8697 			txq->txq_descs[lasttx].wtx_cmdlen |=
   8698 			    htole32(WTX_CMD_VLE);
   8699 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   8700 			    = htole16(vlan_get_tag(m0));
   8701 		}
   8702 
   8703 		txs->txs_lastdesc = lasttx;
   8704 
   8705 		DPRINTF(sc, WM_DEBUG_TX,
   8706 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8707 			device_xname(sc->sc_dev),
   8708 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8709 
   8710 		/* Sync the descriptors we're using. */
   8711 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8712 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8713 
   8714 		/* Give the packet to the chip. */
   8715 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8716 
   8717 		DPRINTF(sc, WM_DEBUG_TX,
   8718 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8719 
   8720 		DPRINTF(sc, WM_DEBUG_TX,
   8721 		    ("%s: TX: finished transmitting packet, job %d\n",
   8722 			device_xname(sc->sc_dev), txq->txq_snext));
   8723 
   8724 		/* Advance the tx pointer. */
   8725 		txq->txq_free -= txs->txs_ndesc;
   8726 		txq->txq_next = nexttx;
   8727 
   8728 		txq->txq_sfree--;
   8729 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8730 
   8731 		/* Pass the packet to any BPF listeners. */
   8732 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8733 	}
   8734 
   8735 	if (m0 != NULL) {
   8736 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8737 		WM_Q_EVCNT_INCR(txq, descdrop);
   8738 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8739 			__func__));
   8740 		m_freem(m0);
   8741 	}
   8742 
   8743 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8744 		/* No more slots; notify upper layer. */
   8745 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8746 	}
   8747 
   8748 	if (txq->txq_free != ofree) {
   8749 		/* Set a watchdog timer in case the chip flakes out. */
   8750 		txq->txq_lastsent = time_uptime;
   8751 		txq->txq_sending = true;
   8752 	}
   8753 }
   8754 
   8755 /*
   8756  * wm_nq_tx_offload:
   8757  *
   8758  *	Set up TCP/IP checksumming parameters for the
   8759  *	specified packet, for NEWQUEUE devices
   8760  */
   8761 static void
   8762 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8763     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   8764 {
   8765 	struct mbuf *m0 = txs->txs_mbuf;
   8766 	uint32_t vl_len, mssidx, cmdc;
   8767 	struct ether_header *eh;
   8768 	int offset, iphl;
   8769 
   8770 	/*
   8771 	 * XXX It would be nice if the mbuf pkthdr had offset
   8772 	 * fields for the protocol headers.
   8773 	 */
   8774 	*cmdlenp = 0;
   8775 	*fieldsp = 0;
   8776 
   8777 	eh = mtod(m0, struct ether_header *);
   8778 	switch (htons(eh->ether_type)) {
   8779 	case ETHERTYPE_IP:
   8780 	case ETHERTYPE_IPV6:
   8781 		offset = ETHER_HDR_LEN;
   8782 		break;
   8783 
   8784 	case ETHERTYPE_VLAN:
   8785 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8786 		break;
   8787 
   8788 	default:
   8789 		/* Don't support this protocol or encapsulation. */
   8790 		*do_csum = false;
   8791 		return;
   8792 	}
   8793 	*do_csum = true;
   8794 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   8795 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   8796 
   8797 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   8798 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   8799 
   8800 	if ((m0->m_pkthdr.csum_flags &
   8801 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8802 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8803 	} else {
   8804 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8805 	}
   8806 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   8807 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   8808 
   8809 	if (vlan_has_tag(m0)) {
   8810 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8811 		    << NQTXC_VLLEN_VLAN_SHIFT);
   8812 		*cmdlenp |= NQTX_CMD_VLE;
   8813 	}
   8814 
   8815 	mssidx = 0;
   8816 
   8817 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8818 		int hlen = offset + iphl;
   8819 		int tcp_hlen;
   8820 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8821 
   8822 		if (__predict_false(m0->m_len <
   8823 				    (hlen + sizeof(struct tcphdr)))) {
   8824 			/*
   8825 			 * TCP/IP headers are not in the first mbuf; we need
   8826 			 * to do this the slow and painful way. Let's just
   8827 			 * hope this doesn't happen very often.
   8828 			 */
   8829 			struct tcphdr th;
   8830 
   8831 			WM_Q_EVCNT_INCR(txq, tsopain);
   8832 
   8833 			m_copydata(m0, hlen, sizeof(th), &th);
   8834 			if (v4) {
   8835 				struct ip ip;
   8836 
   8837 				m_copydata(m0, offset, sizeof(ip), &ip);
   8838 				ip.ip_len = 0;
   8839 				m_copyback(m0,
   8840 				    offset + offsetof(struct ip, ip_len),
   8841 				    sizeof(ip.ip_len), &ip.ip_len);
   8842 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8843 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8844 			} else {
   8845 				struct ip6_hdr ip6;
   8846 
   8847 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8848 				ip6.ip6_plen = 0;
   8849 				m_copyback(m0,
   8850 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8851 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8852 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8853 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8854 			}
   8855 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8856 			    sizeof(th.th_sum), &th.th_sum);
   8857 
   8858 			tcp_hlen = th.th_off << 2;
   8859 		} else {
   8860 			/*
   8861 			 * TCP/IP headers are in the first mbuf; we can do
   8862 			 * this the easy way.
   8863 			 */
   8864 			struct tcphdr *th;
   8865 
   8866 			if (v4) {
   8867 				struct ip *ip =
   8868 				    (void *)(mtod(m0, char *) + offset);
   8869 				th = (void *)(mtod(m0, char *) + hlen);
   8870 
   8871 				ip->ip_len = 0;
   8872 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8873 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8874 			} else {
   8875 				struct ip6_hdr *ip6 =
   8876 				    (void *)(mtod(m0, char *) + offset);
   8877 				th = (void *)(mtod(m0, char *) + hlen);
   8878 
   8879 				ip6->ip6_plen = 0;
   8880 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8881 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8882 			}
   8883 			tcp_hlen = th->th_off << 2;
   8884 		}
   8885 		hlen += tcp_hlen;
   8886 		*cmdlenp |= NQTX_CMD_TSE;
   8887 
   8888 		if (v4) {
   8889 			WM_Q_EVCNT_INCR(txq, tso);
   8890 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   8891 		} else {
   8892 			WM_Q_EVCNT_INCR(txq, tso6);
   8893 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   8894 		}
   8895 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   8896 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8897 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   8898 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   8899 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   8900 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   8901 	} else {
   8902 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   8903 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8904 	}
   8905 
   8906 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   8907 		*fieldsp |= NQTXD_FIELDS_IXSM;
   8908 		cmdc |= NQTXC_CMD_IP4;
   8909 	}
   8910 
   8911 	if (m0->m_pkthdr.csum_flags &
   8912 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   8913 		WM_Q_EVCNT_INCR(txq, tusum);
   8914 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   8915 			cmdc |= NQTXC_CMD_TCP;
   8916 		else
   8917 			cmdc |= NQTXC_CMD_UDP;
   8918 
   8919 		cmdc |= NQTXC_CMD_IP4;
   8920 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8921 	}
   8922 	if (m0->m_pkthdr.csum_flags &
   8923 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8924 		WM_Q_EVCNT_INCR(txq, tusum6);
   8925 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8926 			cmdc |= NQTXC_CMD_TCP;
   8927 		else
   8928 			cmdc |= NQTXC_CMD_UDP;
   8929 
   8930 		cmdc |= NQTXC_CMD_IP6;
   8931 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8932 	}
   8933 
   8934 	/*
   8935 	 * We don't have to write context descriptor for every packet to
   8936 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8937 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8938 	 * controllers.
   8939 	 * It would be overhead to write context descriptor for every packet,
   8940 	 * however it does not cause problems.
   8941 	 */
   8942 	/* Fill in the context descriptor. */
   8943 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_vl_len =
   8944 	    htole32(vl_len);
   8945 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_sn = 0;
   8946 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_cmd =
   8947 	    htole32(cmdc);
   8948 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_mssidx =
   8949 	    htole32(mssidx);
   8950 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8951 	DPRINTF(sc, WM_DEBUG_TX,
   8952 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8953 		txq->txq_next, 0, vl_len));
   8954 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8955 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8956 	txs->txs_ndesc++;
   8957 }
   8958 
   8959 /*
   8960  * wm_nq_start:		[ifnet interface function]
   8961  *
   8962  *	Start packet transmission on the interface for NEWQUEUE devices
   8963  */
   8964 static void
   8965 wm_nq_start(struct ifnet *ifp)
   8966 {
   8967 	struct wm_softc *sc = ifp->if_softc;
   8968 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8969 
   8970 	KASSERT(if_is_mpsafe(ifp));
   8971 	/*
   8972 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8973 	 */
   8974 
   8975 	mutex_enter(txq->txq_lock);
   8976 	if (!txq->txq_stopping)
   8977 		wm_nq_start_locked(ifp);
   8978 	mutex_exit(txq->txq_lock);
   8979 }
   8980 
   8981 static void
   8982 wm_nq_start_locked(struct ifnet *ifp)
   8983 {
   8984 	struct wm_softc *sc = ifp->if_softc;
   8985 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8986 
   8987 	wm_nq_send_common_locked(ifp, txq, false);
   8988 }
   8989 
   8990 static int
   8991 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8992 {
   8993 	int qid;
   8994 	struct wm_softc *sc = ifp->if_softc;
   8995 	struct wm_txqueue *txq;
   8996 
   8997 	qid = wm_select_txqueue(ifp, m);
   8998 	txq = &sc->sc_queue[qid].wmq_txq;
   8999 
   9000 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   9001 		m_freem(m);
   9002 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   9003 		return ENOBUFS;
   9004 	}
   9005 
   9006 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   9007 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   9008 	if (m->m_flags & M_MCAST)
   9009 		if_statinc_ref(nsr, if_omcasts);
   9010 	IF_STAT_PUTREF(ifp);
   9011 
   9012 	/*
   9013 	 * The situations which this mutex_tryenter() fails at running time
   9014 	 * are below two patterns.
   9015 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   9016 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   9017 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   9018 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   9019 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   9020 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   9021 	 * stuck, either.
   9022 	 */
   9023 	if (mutex_tryenter(txq->txq_lock)) {
   9024 		if (!txq->txq_stopping)
   9025 			wm_nq_transmit_locked(ifp, txq);
   9026 		mutex_exit(txq->txq_lock);
   9027 	}
   9028 
   9029 	return 0;
   9030 }
   9031 
   9032 static void
   9033 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   9034 {
   9035 
   9036 	wm_nq_send_common_locked(ifp, txq, true);
   9037 }
   9038 
   9039 static void
   9040 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   9041     bool is_transmit)
   9042 {
   9043 	struct wm_softc *sc = ifp->if_softc;
   9044 	struct mbuf *m0;
   9045 	struct wm_txsoft *txs;
   9046 	bus_dmamap_t dmamap;
   9047 	int error, nexttx, lasttx = -1, seg, segs_needed;
   9048 	bool do_csum, sent;
   9049 	bool remap = true;
   9050 
   9051 	KASSERT(mutex_owned(txq->txq_lock));
   9052 	KASSERT(!txq->txq_stopping);
   9053 
   9054 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   9055 		return;
   9056 
   9057 	if (__predict_false(wm_linkdown_discard(txq))) {
   9058 		do {
   9059 			if (is_transmit)
   9060 				m0 = pcq_get(txq->txq_interq);
   9061 			else
   9062 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   9063 			/*
   9064 			 * increment successed packet counter as in the case
   9065 			 * which the packet is discarded by link down PHY.
   9066 			 */
   9067 			if (m0 != NULL) {
   9068 				if_statinc(ifp, if_opackets);
   9069 				m_freem(m0);
   9070 			}
   9071 		} while (m0 != NULL);
   9072 		return;
   9073 	}
   9074 
   9075 	sent = false;
   9076 
   9077 	/*
   9078 	 * Loop through the send queue, setting up transmit descriptors
   9079 	 * until we drain the queue, or use up all available transmit
   9080 	 * descriptors.
   9081 	 */
   9082 	for (;;) {
   9083 		m0 = NULL;
   9084 
   9085 		/* Get a work queue entry. */
   9086 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   9087 			wm_txeof(txq, UINT_MAX);
   9088 			if (txq->txq_sfree == 0) {
   9089 				DPRINTF(sc, WM_DEBUG_TX,
   9090 				    ("%s: TX: no free job descriptors\n",
   9091 					device_xname(sc->sc_dev)));
   9092 				WM_Q_EVCNT_INCR(txq, txsstall);
   9093 				break;
   9094 			}
   9095 		}
   9096 
   9097 		/* Grab a packet off the queue. */
   9098 		if (is_transmit)
   9099 			m0 = pcq_get(txq->txq_interq);
   9100 		else
   9101 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   9102 		if (m0 == NULL)
   9103 			break;
   9104 
   9105 		DPRINTF(sc, WM_DEBUG_TX,
   9106 		    ("%s: TX: have packet to transmit: %p\n",
   9107 			device_xname(sc->sc_dev), m0));
   9108 
   9109 		txs = &txq->txq_soft[txq->txq_snext];
   9110 		dmamap = txs->txs_dmamap;
   9111 
   9112 		/*
   9113 		 * Load the DMA map.  If this fails, the packet either
   9114 		 * didn't fit in the allotted number of segments, or we
   9115 		 * were short on resources.  For the too-many-segments
   9116 		 * case, we simply report an error and drop the packet,
   9117 		 * since we can't sanely copy a jumbo packet to a single
   9118 		 * buffer.
   9119 		 */
   9120 retry:
   9121 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   9122 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   9123 		if (__predict_false(error)) {
   9124 			if (error == EFBIG) {
   9125 				if (remap == true) {
   9126 					struct mbuf *m;
   9127 
   9128 					remap = false;
   9129 					m = m_defrag(m0, M_NOWAIT);
   9130 					if (m != NULL) {
   9131 						WM_Q_EVCNT_INCR(txq, defrag);
   9132 						m0 = m;
   9133 						goto retry;
   9134 					}
   9135 				}
   9136 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   9137 				log(LOG_ERR, "%s: Tx packet consumes too many "
   9138 				    "DMA segments, dropping...\n",
   9139 				    device_xname(sc->sc_dev));
   9140 				wm_dump_mbuf_chain(sc, m0);
   9141 				m_freem(m0);
   9142 				continue;
   9143 			}
   9144 			/* Short on resources, just stop for now. */
   9145 			DPRINTF(sc, WM_DEBUG_TX,
   9146 			    ("%s: TX: dmamap load failed: %d\n",
   9147 				device_xname(sc->sc_dev), error));
   9148 			break;
   9149 		}
   9150 
   9151 		segs_needed = dmamap->dm_nsegs;
   9152 
   9153 		/*
   9154 		 * Ensure we have enough descriptors free to describe
   9155 		 * the packet. Note, we always reserve one descriptor
   9156 		 * at the end of the ring due to the semantics of the
   9157 		 * TDT register, plus one more in the event we need
   9158 		 * to load offload context.
   9159 		 */
   9160 		if (segs_needed > txq->txq_free - 2) {
   9161 			/*
   9162 			 * Not enough free descriptors to transmit this
   9163 			 * packet.  We haven't committed anything yet,
   9164 			 * so just unload the DMA map, put the packet
   9165 			 * pack on the queue, and punt. Notify the upper
   9166 			 * layer that there are no more slots left.
   9167 			 */
   9168 			DPRINTF(sc, WM_DEBUG_TX,
   9169 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   9170 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   9171 				segs_needed, txq->txq_free - 1));
   9172 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   9173 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   9174 			WM_Q_EVCNT_INCR(txq, txdstall);
   9175 			break;
   9176 		}
   9177 
   9178 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   9179 
   9180 		DPRINTF(sc, WM_DEBUG_TX,
   9181 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   9182 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   9183 
   9184 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   9185 
   9186 		/*
   9187 		 * Store a pointer to the packet so that we can free it
   9188 		 * later.
   9189 		 *
   9190 		 * Initially, we consider the number of descriptors the
   9191 		 * packet uses the number of DMA segments.  This may be
   9192 		 * incremented by 1 if we do checksum offload (a descriptor
   9193 		 * is used to set the checksum context).
   9194 		 */
   9195 		txs->txs_mbuf = m0;
   9196 		txs->txs_firstdesc = txq->txq_next;
   9197 		txs->txs_ndesc = segs_needed;
   9198 
   9199 		/* Set up offload parameters for this packet. */
   9200 		uint32_t cmdlen, fields, dcmdlen;
   9201 		if (m0->m_pkthdr.csum_flags &
   9202 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   9203 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9204 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   9205 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   9206 			    &do_csum);
   9207 		} else {
   9208 			do_csum = false;
   9209 			cmdlen = 0;
   9210 			fields = 0;
   9211 		}
   9212 
   9213 		/* Sync the DMA map. */
   9214 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   9215 		    BUS_DMASYNC_PREWRITE);
   9216 
   9217 		/* Initialize the first transmit descriptor. */
   9218 		nexttx = txq->txq_next;
   9219 		if (!do_csum) {
   9220 			/* Set up a legacy descriptor */
   9221 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   9222 			    dmamap->dm_segs[0].ds_addr);
   9223 			txq->txq_descs[nexttx].wtx_cmdlen =
   9224 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   9225 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   9226 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   9227 			if (vlan_has_tag(m0)) {
   9228 				txq->txq_descs[nexttx].wtx_cmdlen |=
   9229 				    htole32(WTX_CMD_VLE);
   9230 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   9231 				    htole16(vlan_get_tag(m0));
   9232 			} else
   9233 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   9234 
   9235 			dcmdlen = 0;
   9236 		} else {
   9237 			/* Set up an advanced data descriptor */
   9238 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9239 			    htole64(dmamap->dm_segs[0].ds_addr);
   9240 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   9241 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9242 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   9243 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   9244 			    htole32(fields);
   9245 			DPRINTF(sc, WM_DEBUG_TX,
   9246 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   9247 				device_xname(sc->sc_dev), nexttx,
   9248 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   9249 			DPRINTF(sc, WM_DEBUG_TX,
   9250 			    ("\t 0x%08x%08x\n", fields,
   9251 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   9252 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   9253 		}
   9254 
   9255 		lasttx = nexttx;
   9256 		nexttx = WM_NEXTTX(txq, nexttx);
   9257 		/*
   9258 		 * Fill in the next descriptors. Legacy or advanced format
   9259 		 * is the same here.
   9260 		 */
   9261 		for (seg = 1; seg < dmamap->dm_nsegs;
   9262 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   9263 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9264 			    htole64(dmamap->dm_segs[seg].ds_addr);
   9265 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9266 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   9267 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   9268 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   9269 			lasttx = nexttx;
   9270 
   9271 			DPRINTF(sc, WM_DEBUG_TX,
   9272 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   9273 				device_xname(sc->sc_dev), nexttx,
   9274 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   9275 				dmamap->dm_segs[seg].ds_len));
   9276 		}
   9277 
   9278 		KASSERT(lasttx != -1);
   9279 
   9280 		/*
   9281 		 * Set up the command byte on the last descriptor of
   9282 		 * the packet. If we're in the interrupt delay window,
   9283 		 * delay the interrupt.
   9284 		 */
   9285 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   9286 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   9287 		txq->txq_descs[lasttx].wtx_cmdlen |=
   9288 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   9289 
   9290 		txs->txs_lastdesc = lasttx;
   9291 
   9292 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   9293 		    device_xname(sc->sc_dev),
   9294 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   9295 
   9296 		/* Sync the descriptors we're using. */
   9297 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   9298 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   9299 
   9300 		/* Give the packet to the chip. */
   9301 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   9302 		sent = true;
   9303 
   9304 		DPRINTF(sc, WM_DEBUG_TX,
   9305 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   9306 
   9307 		DPRINTF(sc, WM_DEBUG_TX,
   9308 		    ("%s: TX: finished transmitting packet, job %d\n",
   9309 			device_xname(sc->sc_dev), txq->txq_snext));
   9310 
   9311 		/* Advance the tx pointer. */
   9312 		txq->txq_free -= txs->txs_ndesc;
   9313 		txq->txq_next = nexttx;
   9314 
   9315 		txq->txq_sfree--;
   9316 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   9317 
   9318 		/* Pass the packet to any BPF listeners. */
   9319 		bpf_mtap(ifp, m0, BPF_D_OUT);
   9320 	}
   9321 
   9322 	if (m0 != NULL) {
   9323 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9324 		WM_Q_EVCNT_INCR(txq, descdrop);
   9325 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   9326 			__func__));
   9327 		m_freem(m0);
   9328 	}
   9329 
   9330 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   9331 		/* No more slots; notify upper layer. */
   9332 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9333 	}
   9334 
   9335 	if (sent) {
   9336 		/* Set a watchdog timer in case the chip flakes out. */
   9337 		txq->txq_lastsent = time_uptime;
   9338 		txq->txq_sending = true;
   9339 	}
   9340 }
   9341 
   9342 static void
   9343 wm_deferred_start_locked(struct wm_txqueue *txq)
   9344 {
   9345 	struct wm_softc *sc = txq->txq_sc;
   9346 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9347 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   9348 	int qid = wmq->wmq_id;
   9349 
   9350 	KASSERT(mutex_owned(txq->txq_lock));
   9351 	KASSERT(!txq->txq_stopping);
   9352 
   9353 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   9354 		/* XXX need for ALTQ or one CPU system */
   9355 		if (qid == 0)
   9356 			wm_nq_start_locked(ifp);
   9357 		wm_nq_transmit_locked(ifp, txq);
   9358 	} else {
   9359 		/* XXX need for ALTQ or one CPU system */
   9360 		if (qid == 0)
   9361 			wm_start_locked(ifp);
   9362 		wm_transmit_locked(ifp, txq);
   9363 	}
   9364 }
   9365 
   9366 /* Interrupt */
   9367 
   9368 /*
   9369  * wm_txeof:
   9370  *
   9371  *	Helper; handle transmit interrupts.
   9372  */
   9373 static bool
   9374 wm_txeof(struct wm_txqueue *txq, u_int limit)
   9375 {
   9376 	struct wm_softc *sc = txq->txq_sc;
   9377 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9378 	struct wm_txsoft *txs;
   9379 	int count = 0;
   9380 	int i;
   9381 	uint8_t status;
   9382 	bool more = false;
   9383 
   9384 	KASSERT(mutex_owned(txq->txq_lock));
   9385 
   9386 	if (txq->txq_stopping)
   9387 		return false;
   9388 
   9389 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   9390 
   9391 	/*
   9392 	 * Go through the Tx list and free mbufs for those
   9393 	 * frames which have been transmitted.
   9394 	 */
   9395 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   9396 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   9397 		txs = &txq->txq_soft[i];
   9398 
   9399 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   9400 			device_xname(sc->sc_dev), i));
   9401 
   9402 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   9403 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9404 
   9405 		status =
   9406 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   9407 		if ((status & WTX_ST_DD) == 0) {
   9408 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   9409 			    BUS_DMASYNC_PREREAD);
   9410 			break;
   9411 		}
   9412 
   9413 		if (limit-- == 0) {
   9414 			more = true;
   9415 			DPRINTF(sc, WM_DEBUG_TX,
   9416 			    ("%s: TX: loop limited, job %d is not processed\n",
   9417 				device_xname(sc->sc_dev), i));
   9418 			break;
   9419 		}
   9420 
   9421 		count++;
   9422 		DPRINTF(sc, WM_DEBUG_TX,
   9423 		    ("%s: TX: job %d done: descs %d..%d\n",
   9424 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   9425 		    txs->txs_lastdesc));
   9426 
   9427 		/*
   9428 		 * XXX We should probably be using the statistics
   9429 		 * XXX registers, but I don't know if they exist
   9430 		 * XXX on chips before the i82544.
   9431 		 */
   9432 
   9433 #ifdef WM_EVENT_COUNTERS
   9434 		if (status & WTX_ST_TU)
   9435 			WM_Q_EVCNT_INCR(txq, underrun);
   9436 #endif /* WM_EVENT_COUNTERS */
   9437 
   9438 		/*
   9439 		 * 82574 and newer's document says the status field has neither
   9440 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   9441 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   9442 		 * Developer's Manual", 82574 datasheet and newer.
   9443 		 *
   9444 		 * XXX I saw the LC bit was set on I218 even though the media
   9445 		 * was full duplex, so the bit might be used for other
   9446 		 * meaning ...(I have no document).
   9447 		 */
   9448 
   9449 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   9450 		    && ((sc->sc_type < WM_T_82574)
   9451 			|| (sc->sc_type == WM_T_80003))) {
   9452 			if_statinc(ifp, if_oerrors);
   9453 			if (status & WTX_ST_LC)
   9454 				log(LOG_WARNING, "%s: late collision\n",
   9455 				    device_xname(sc->sc_dev));
   9456 			else if (status & WTX_ST_EC) {
   9457 				if_statadd(ifp, if_collisions,
   9458 				    TX_COLLISION_THRESHOLD + 1);
   9459 				log(LOG_WARNING, "%s: excessive collisions\n",
   9460 				    device_xname(sc->sc_dev));
   9461 			}
   9462 		} else
   9463 			if_statinc(ifp, if_opackets);
   9464 
   9465 		txq->txq_packets++;
   9466 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   9467 
   9468 		txq->txq_free += txs->txs_ndesc;
   9469 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   9470 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   9471 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   9472 		m_freem(txs->txs_mbuf);
   9473 		txs->txs_mbuf = NULL;
   9474 	}
   9475 
   9476 	/* Update the dirty transmit buffer pointer. */
   9477 	txq->txq_sdirty = i;
   9478 	DPRINTF(sc, WM_DEBUG_TX,
   9479 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   9480 
   9481 	if (count != 0)
   9482 		rnd_add_uint32(&sc->rnd_source, count);
   9483 
   9484 	/*
   9485 	 * If there are no more pending transmissions, cancel the watchdog
   9486 	 * timer.
   9487 	 */
   9488 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   9489 		txq->txq_sending = false;
   9490 
   9491 	return more;
   9492 }
   9493 
   9494 static inline uint32_t
   9495 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   9496 {
   9497 	struct wm_softc *sc = rxq->rxq_sc;
   9498 
   9499 	if (sc->sc_type == WM_T_82574)
   9500 		return EXTRXC_STATUS(
   9501 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9502 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9503 		return NQRXC_STATUS(
   9504 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9505 	else
   9506 		return rxq->rxq_descs[idx].wrx_status;
   9507 }
   9508 
   9509 static inline uint32_t
   9510 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   9511 {
   9512 	struct wm_softc *sc = rxq->rxq_sc;
   9513 
   9514 	if (sc->sc_type == WM_T_82574)
   9515 		return EXTRXC_ERROR(
   9516 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9517 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9518 		return NQRXC_ERROR(
   9519 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9520 	else
   9521 		return rxq->rxq_descs[idx].wrx_errors;
   9522 }
   9523 
   9524 static inline uint16_t
   9525 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   9526 {
   9527 	struct wm_softc *sc = rxq->rxq_sc;
   9528 
   9529 	if (sc->sc_type == WM_T_82574)
   9530 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   9531 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9532 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   9533 	else
   9534 		return rxq->rxq_descs[idx].wrx_special;
   9535 }
   9536 
   9537 static inline int
   9538 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   9539 {
   9540 	struct wm_softc *sc = rxq->rxq_sc;
   9541 
   9542 	if (sc->sc_type == WM_T_82574)
   9543 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   9544 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9545 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   9546 	else
   9547 		return rxq->rxq_descs[idx].wrx_len;
   9548 }
   9549 
   9550 #ifdef WM_DEBUG
   9551 static inline uint32_t
   9552 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   9553 {
   9554 	struct wm_softc *sc = rxq->rxq_sc;
   9555 
   9556 	if (sc->sc_type == WM_T_82574)
   9557 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   9558 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9559 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   9560 	else
   9561 		return 0;
   9562 }
   9563 
   9564 static inline uint8_t
   9565 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   9566 {
   9567 	struct wm_softc *sc = rxq->rxq_sc;
   9568 
   9569 	if (sc->sc_type == WM_T_82574)
   9570 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   9571 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9572 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   9573 	else
   9574 		return 0;
   9575 }
   9576 #endif /* WM_DEBUG */
   9577 
   9578 static inline bool
   9579 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   9580     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9581 {
   9582 
   9583 	if (sc->sc_type == WM_T_82574)
   9584 		return (status & ext_bit) != 0;
   9585 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9586 		return (status & nq_bit) != 0;
   9587 	else
   9588 		return (status & legacy_bit) != 0;
   9589 }
   9590 
   9591 static inline bool
   9592 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   9593     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9594 {
   9595 
   9596 	if (sc->sc_type == WM_T_82574)
   9597 		return (error & ext_bit) != 0;
   9598 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9599 		return (error & nq_bit) != 0;
   9600 	else
   9601 		return (error & legacy_bit) != 0;
   9602 }
   9603 
   9604 static inline bool
   9605 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   9606 {
   9607 
   9608 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9609 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   9610 		return true;
   9611 	else
   9612 		return false;
   9613 }
   9614 
   9615 static inline bool
   9616 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   9617 {
   9618 	struct wm_softc *sc = rxq->rxq_sc;
   9619 
   9620 	/* XXX missing error bit for newqueue? */
   9621 	if (wm_rxdesc_is_set_error(sc, errors,
   9622 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   9623 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   9624 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   9625 		NQRXC_ERROR_RXE)) {
   9626 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   9627 		    EXTRXC_ERROR_SE, 0))
   9628 			log(LOG_WARNING, "%s: symbol error\n",
   9629 			    device_xname(sc->sc_dev));
   9630 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   9631 		    EXTRXC_ERROR_SEQ, 0))
   9632 			log(LOG_WARNING, "%s: receive sequence error\n",
   9633 			    device_xname(sc->sc_dev));
   9634 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   9635 		    EXTRXC_ERROR_CE, 0))
   9636 			log(LOG_WARNING, "%s: CRC error\n",
   9637 			    device_xname(sc->sc_dev));
   9638 		return true;
   9639 	}
   9640 
   9641 	return false;
   9642 }
   9643 
   9644 static inline bool
   9645 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   9646 {
   9647 	struct wm_softc *sc = rxq->rxq_sc;
   9648 
   9649 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   9650 		NQRXC_STATUS_DD)) {
   9651 		/* We have processed all of the receive descriptors. */
   9652 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   9653 		return false;
   9654 	}
   9655 
   9656 	return true;
   9657 }
   9658 
   9659 static inline bool
   9660 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   9661     uint16_t vlantag, struct mbuf *m)
   9662 {
   9663 
   9664 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9665 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   9666 		vlan_set_tag(m, le16toh(vlantag));
   9667 	}
   9668 
   9669 	return true;
   9670 }
   9671 
   9672 static inline void
   9673 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   9674     uint32_t errors, struct mbuf *m)
   9675 {
   9676 	struct wm_softc *sc = rxq->rxq_sc;
   9677 
   9678 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   9679 		if (wm_rxdesc_is_set_status(sc, status,
   9680 		    WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   9681 			WM_Q_EVCNT_INCR(rxq, ipsum);
   9682 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   9683 			if (wm_rxdesc_is_set_error(sc, errors,
   9684 			    WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   9685 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   9686 		}
   9687 		if (wm_rxdesc_is_set_status(sc, status,
   9688 		    WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   9689 			/*
   9690 			 * Note: we don't know if this was TCP or UDP,
   9691 			 * so we just set both bits, and expect the
   9692 			 * upper layers to deal.
   9693 			 */
   9694 			WM_Q_EVCNT_INCR(rxq, tusum);
   9695 			m->m_pkthdr.csum_flags |=
   9696 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9697 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   9698 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   9699 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   9700 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   9701 		}
   9702 	}
   9703 }
   9704 
   9705 /*
   9706  * wm_rxeof:
   9707  *
   9708  *	Helper; handle receive interrupts.
   9709  */
   9710 static bool
   9711 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   9712 {
   9713 	struct wm_softc *sc = rxq->rxq_sc;
   9714 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9715 	struct wm_rxsoft *rxs;
   9716 	struct mbuf *m;
   9717 	int i, len;
   9718 	int count = 0;
   9719 	uint32_t status, errors;
   9720 	uint16_t vlantag;
   9721 	bool more = false;
   9722 
   9723 	KASSERT(mutex_owned(rxq->rxq_lock));
   9724 
   9725 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   9726 		rxs = &rxq->rxq_soft[i];
   9727 
   9728 		DPRINTF(sc, WM_DEBUG_RX,
   9729 		    ("%s: RX: checking descriptor %d\n",
   9730 			device_xname(sc->sc_dev), i));
   9731 		wm_cdrxsync(rxq, i,
   9732 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9733 
   9734 		status = wm_rxdesc_get_status(rxq, i);
   9735 		errors = wm_rxdesc_get_errors(rxq, i);
   9736 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   9737 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   9738 #ifdef WM_DEBUG
   9739 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   9740 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   9741 #endif
   9742 
   9743 		if (!wm_rxdesc_dd(rxq, i, status))
   9744 			break;
   9745 
   9746 		if (limit-- == 0) {
   9747 			more = true;
   9748 			DPRINTF(sc, WM_DEBUG_RX,
   9749 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   9750 				device_xname(sc->sc_dev), i));
   9751 			break;
   9752 		}
   9753 
   9754 		count++;
   9755 		if (__predict_false(rxq->rxq_discard)) {
   9756 			DPRINTF(sc, WM_DEBUG_RX,
   9757 			    ("%s: RX: discarding contents of descriptor %d\n",
   9758 				device_xname(sc->sc_dev), i));
   9759 			wm_init_rxdesc(rxq, i);
   9760 			if (wm_rxdesc_is_eop(rxq, status)) {
   9761 				/* Reset our state. */
   9762 				DPRINTF(sc, WM_DEBUG_RX,
   9763 				    ("%s: RX: resetting rxdiscard -> 0\n",
   9764 					device_xname(sc->sc_dev)));
   9765 				rxq->rxq_discard = 0;
   9766 			}
   9767 			continue;
   9768 		}
   9769 
   9770 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9771 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   9772 
   9773 		m = rxs->rxs_mbuf;
   9774 
   9775 		/*
   9776 		 * Add a new receive buffer to the ring, unless of
   9777 		 * course the length is zero. Treat the latter as a
   9778 		 * failed mapping.
   9779 		 */
   9780 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   9781 			/*
   9782 			 * Failed, throw away what we've done so
   9783 			 * far, and discard the rest of the packet.
   9784 			 */
   9785 			if_statinc(ifp, if_ierrors);
   9786 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9787 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   9788 			wm_init_rxdesc(rxq, i);
   9789 			if (!wm_rxdesc_is_eop(rxq, status))
   9790 				rxq->rxq_discard = 1;
   9791 			if (rxq->rxq_head != NULL)
   9792 				m_freem(rxq->rxq_head);
   9793 			WM_RXCHAIN_RESET(rxq);
   9794 			DPRINTF(sc, WM_DEBUG_RX,
   9795 			    ("%s: RX: Rx buffer allocation failed, "
   9796 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   9797 				rxq->rxq_discard ? " (discard)" : ""));
   9798 			continue;
   9799 		}
   9800 
   9801 		m->m_len = len;
   9802 		rxq->rxq_len += len;
   9803 		DPRINTF(sc, WM_DEBUG_RX,
   9804 		    ("%s: RX: buffer at %p len %d\n",
   9805 			device_xname(sc->sc_dev), m->m_data, len));
   9806 
   9807 		/* If this is not the end of the packet, keep looking. */
   9808 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9809 			WM_RXCHAIN_LINK(rxq, m);
   9810 			DPRINTF(sc, WM_DEBUG_RX,
   9811 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9812 				device_xname(sc->sc_dev), rxq->rxq_len));
   9813 			continue;
   9814 		}
   9815 
   9816 		/*
   9817 		 * Okay, we have the entire packet now. The chip is
   9818 		 * configured to include the FCS except I35[04], I21[01].
   9819 		 * (not all chips can be configured to strip it), so we need
   9820 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
   9821 		 * in RCTL register is always set, so we don't trim it.
   9822 		 * PCH2 and newer chip also not include FCS when jumbo
   9823 		 * frame is used to do workaround an errata.
   9824 		 * May need to adjust length of previous mbuf in the
   9825 		 * chain if the current mbuf is too short.
   9826 		 */
   9827 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
   9828 			if (m->m_len < ETHER_CRC_LEN) {
   9829 				rxq->rxq_tail->m_len
   9830 				    -= (ETHER_CRC_LEN - m->m_len);
   9831 				m->m_len = 0;
   9832 			} else
   9833 				m->m_len -= ETHER_CRC_LEN;
   9834 			len = rxq->rxq_len - ETHER_CRC_LEN;
   9835 		} else
   9836 			len = rxq->rxq_len;
   9837 
   9838 		WM_RXCHAIN_LINK(rxq, m);
   9839 
   9840 		*rxq->rxq_tailp = NULL;
   9841 		m = rxq->rxq_head;
   9842 
   9843 		WM_RXCHAIN_RESET(rxq);
   9844 
   9845 		DPRINTF(sc, WM_DEBUG_RX,
   9846 		    ("%s: RX: have entire packet, len -> %d\n",
   9847 			device_xname(sc->sc_dev), len));
   9848 
   9849 		/* If an error occurred, update stats and drop the packet. */
   9850 		if (wm_rxdesc_has_errors(rxq, errors)) {
   9851 			m_freem(m);
   9852 			continue;
   9853 		}
   9854 
   9855 		/* No errors.  Receive the packet. */
   9856 		m_set_rcvif(m, ifp);
   9857 		m->m_pkthdr.len = len;
   9858 		/*
   9859 		 * TODO
   9860 		 * should be save rsshash and rsstype to this mbuf.
   9861 		 */
   9862 		DPRINTF(sc, WM_DEBUG_RX,
   9863 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   9864 			device_xname(sc->sc_dev), rsstype, rsshash));
   9865 
   9866 		/*
   9867 		 * If VLANs are enabled, VLAN packets have been unwrapped
   9868 		 * for us.  Associate the tag with the packet.
   9869 		 */
   9870 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   9871 			continue;
   9872 
   9873 		/* Set up checksum info for this packet. */
   9874 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   9875 
   9876 		rxq->rxq_packets++;
   9877 		rxq->rxq_bytes += len;
   9878 		/* Pass it on. */
   9879 		if_percpuq_enqueue(sc->sc_ipq, m);
   9880 
   9881 		if (rxq->rxq_stopping)
   9882 			break;
   9883 	}
   9884 	rxq->rxq_ptr = i;
   9885 
   9886 	if (count != 0)
   9887 		rnd_add_uint32(&sc->rnd_source, count);
   9888 
   9889 	DPRINTF(sc, WM_DEBUG_RX,
   9890 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   9891 
   9892 	return more;
   9893 }
   9894 
   9895 /*
   9896  * wm_linkintr_gmii:
   9897  *
   9898  *	Helper; handle link interrupts for GMII.
   9899  */
   9900 static void
   9901 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   9902 {
   9903 	device_t dev = sc->sc_dev;
   9904 	uint32_t status, reg;
   9905 	bool link;
   9906 	int rv;
   9907 
   9908 	KASSERT(mutex_owned(sc->sc_core_lock));
   9909 
   9910 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9911 		__func__));
   9912 
   9913 	if ((icr & ICR_LSC) == 0) {
   9914 		if (icr & ICR_RXSEQ)
   9915 			DPRINTF(sc, WM_DEBUG_LINK,
   9916 			    ("%s: LINK Receive sequence error\n",
   9917 				device_xname(dev)));
   9918 		return;
   9919 	}
   9920 
   9921 	/* Link status changed */
   9922 	status = CSR_READ(sc, WMREG_STATUS);
   9923 	link = status & STATUS_LU;
   9924 	if (link) {
   9925 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9926 			device_xname(dev),
   9927 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9928 		if (wm_phy_need_linkdown_discard(sc)) {
   9929 			DPRINTF(sc, WM_DEBUG_LINK,
   9930 			    ("%s: linkintr: Clear linkdown discard flag\n",
   9931 				device_xname(dev)));
   9932 			wm_clear_linkdown_discard(sc);
   9933 		}
   9934 	} else {
   9935 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9936 			device_xname(dev)));
   9937 		if (wm_phy_need_linkdown_discard(sc)) {
   9938 			DPRINTF(sc, WM_DEBUG_LINK,
   9939 			    ("%s: linkintr: Set linkdown discard flag\n",
   9940 				device_xname(dev)));
   9941 			wm_set_linkdown_discard(sc);
   9942 		}
   9943 	}
   9944 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9945 		wm_gig_downshift_workaround_ich8lan(sc);
   9946 
   9947 	if ((sc->sc_type == WM_T_ICH8) && (sc->sc_phytype == WMPHY_IGP_3))
   9948 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9949 
   9950 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9951 		device_xname(dev)));
   9952 	mii_pollstat(&sc->sc_mii);
   9953 	if (sc->sc_type == WM_T_82543) {
   9954 		int miistatus, active;
   9955 
   9956 		/*
   9957 		 * With 82543, we need to force speed and
   9958 		 * duplex on the MAC equal to what the PHY
   9959 		 * speed and duplex configuration is.
   9960 		 */
   9961 		miistatus = sc->sc_mii.mii_media_status;
   9962 
   9963 		if (miistatus & IFM_ACTIVE) {
   9964 			active = sc->sc_mii.mii_media_active;
   9965 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9966 			switch (IFM_SUBTYPE(active)) {
   9967 			case IFM_10_T:
   9968 				sc->sc_ctrl |= CTRL_SPEED_10;
   9969 				break;
   9970 			case IFM_100_TX:
   9971 				sc->sc_ctrl |= CTRL_SPEED_100;
   9972 				break;
   9973 			case IFM_1000_T:
   9974 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9975 				break;
   9976 			default:
   9977 				/*
   9978 				 * Fiber?
   9979 				 * Shoud not enter here.
   9980 				 */
   9981 				device_printf(dev, "unknown media (%x)\n",
   9982 				    active);
   9983 				break;
   9984 			}
   9985 			if (active & IFM_FDX)
   9986 				sc->sc_ctrl |= CTRL_FD;
   9987 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9988 		}
   9989 	} else if (sc->sc_type == WM_T_PCH) {
   9990 		wm_k1_gig_workaround_hv(sc,
   9991 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9992 	}
   9993 
   9994 	/*
   9995 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9996 	 * aggressive resulting in many collisions. To avoid this, increase
   9997 	 * the IPG and reduce Rx latency in the PHY.
   9998 	 */
   9999 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   10000 	    && link) {
   10001 		uint32_t tipg_reg;
   10002 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   10003 		bool fdx;
   10004 		uint16_t emi_addr, emi_val;
   10005 
   10006 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   10007 		tipg_reg &= ~TIPG_IPGT_MASK;
   10008 		fdx = status & STATUS_FD;
   10009 
   10010 		if (!fdx && (speed == STATUS_SPEED_10)) {
   10011 			tipg_reg |= 0xff;
   10012 			/* Reduce Rx latency in analog PHY */
   10013 			emi_val = 0;
   10014 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   10015 		    fdx && speed != STATUS_SPEED_1000) {
   10016 			tipg_reg |= 0xc;
   10017 			emi_val = 1;
   10018 		} else {
   10019 			/* Roll back the default values */
   10020 			tipg_reg |= 0x08;
   10021 			emi_val = 1;
   10022 		}
   10023 
   10024 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   10025 
   10026 		rv = sc->phy.acquire(sc);
   10027 		if (rv)
   10028 			return;
   10029 
   10030 		if (sc->sc_type == WM_T_PCH2)
   10031 			emi_addr = I82579_RX_CONFIG;
   10032 		else
   10033 			emi_addr = I217_RX_CONFIG;
   10034 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   10035 
   10036 		if (sc->sc_type >= WM_T_PCH_LPT) {
   10037 			uint16_t phy_reg;
   10038 
   10039 			sc->phy.readreg_locked(dev, 2,
   10040 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   10041 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   10042 			if (speed == STATUS_SPEED_100
   10043 			    || speed == STATUS_SPEED_10)
   10044 				phy_reg |= 0x3e8;
   10045 			else
   10046 				phy_reg |= 0xfa;
   10047 			sc->phy.writereg_locked(dev, 2,
   10048 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   10049 
   10050 			if (speed == STATUS_SPEED_1000) {
   10051 				sc->phy.readreg_locked(dev, 2,
   10052 				    HV_PM_CTRL, &phy_reg);
   10053 
   10054 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   10055 
   10056 				sc->phy.writereg_locked(dev, 2,
   10057 				    HV_PM_CTRL, phy_reg);
   10058 			}
   10059 		}
   10060 		sc->phy.release(sc);
   10061 
   10062 		if (rv)
   10063 			return;
   10064 
   10065 		if (sc->sc_type >= WM_T_PCH_SPT) {
   10066 			uint16_t data, ptr_gap;
   10067 
   10068 			if (speed == STATUS_SPEED_1000) {
   10069 				rv = sc->phy.acquire(sc);
   10070 				if (rv)
   10071 					return;
   10072 
   10073 				rv = sc->phy.readreg_locked(dev, 2,
   10074 				    I82579_UNKNOWN1, &data);
   10075 				if (rv) {
   10076 					sc->phy.release(sc);
   10077 					return;
   10078 				}
   10079 
   10080 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   10081 				if (ptr_gap < 0x18) {
   10082 					data &= ~(0x3ff << 2);
   10083 					data |= (0x18 << 2);
   10084 					rv = sc->phy.writereg_locked(dev,
   10085 					    2, I82579_UNKNOWN1, data);
   10086 				}
   10087 				sc->phy.release(sc);
   10088 				if (rv)
   10089 					return;
   10090 			} else {
   10091 				rv = sc->phy.acquire(sc);
   10092 				if (rv)
   10093 					return;
   10094 
   10095 				rv = sc->phy.writereg_locked(dev, 2,
   10096 				    I82579_UNKNOWN1, 0xc023);
   10097 				sc->phy.release(sc);
   10098 				if (rv)
   10099 					return;
   10100 
   10101 			}
   10102 		}
   10103 	}
   10104 
   10105 	/*
   10106 	 * I217 Packet Loss issue:
   10107 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   10108 	 * on power up.
   10109 	 * Set the Beacon Duration for I217 to 8 usec
   10110 	 */
   10111 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10112 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   10113 		reg &= ~FEXTNVM4_BEACON_DURATION;
   10114 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   10115 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   10116 	}
   10117 
   10118 	/* Work-around I218 hang issue */
   10119 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   10120 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   10121 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   10122 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   10123 		wm_k1_workaround_lpt_lp(sc, link);
   10124 
   10125 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10126 		/*
   10127 		 * Set platform power management values for Latency
   10128 		 * Tolerance Reporting (LTR)
   10129 		 */
   10130 		wm_platform_pm_pch_lpt(sc,
   10131 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   10132 	}
   10133 
   10134 	/* Clear link partner's EEE ability */
   10135 	sc->eee_lp_ability = 0;
   10136 
   10137 	/* FEXTNVM6 K1-off workaround */
   10138 	if (sc->sc_type == WM_T_PCH_SPT) {
   10139 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   10140 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   10141 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   10142 		else
   10143 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   10144 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   10145 	}
   10146 
   10147 	if (!link)
   10148 		return;
   10149 
   10150 	switch (sc->sc_type) {
   10151 	case WM_T_PCH2:
   10152 		wm_k1_workaround_lv(sc);
   10153 		/* FALLTHROUGH */
   10154 	case WM_T_PCH:
   10155 		if (sc->sc_phytype == WMPHY_82578)
   10156 			wm_link_stall_workaround_hv(sc);
   10157 		break;
   10158 	default:
   10159 		break;
   10160 	}
   10161 
   10162 	/* Enable/Disable EEE after link up */
   10163 	if (sc->sc_phytype > WMPHY_82579)
   10164 		wm_set_eee_pchlan(sc);
   10165 }
   10166 
   10167 /*
   10168  * wm_linkintr_tbi:
   10169  *
   10170  *	Helper; handle link interrupts for TBI mode.
   10171  */
   10172 static void
   10173 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   10174 {
   10175 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10176 	uint32_t status;
   10177 
   10178 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10179 		__func__));
   10180 
   10181 	status = CSR_READ(sc, WMREG_STATUS);
   10182 	if (icr & ICR_LSC) {
   10183 		wm_check_for_link(sc);
   10184 		if (status & STATUS_LU) {
   10185 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   10186 				device_xname(sc->sc_dev),
   10187 				(status & STATUS_FD) ? "FDX" : "HDX"));
   10188 			/*
   10189 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10190 			 * so we should update sc->sc_ctrl
   10191 			 */
   10192 
   10193 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10194 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10195 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10196 			if (status & STATUS_FD)
   10197 				sc->sc_tctl |=
   10198 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10199 			else
   10200 				sc->sc_tctl |=
   10201 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10202 			if (sc->sc_ctrl & CTRL_TFCE)
   10203 				sc->sc_fcrtl |= FCRTL_XONE;
   10204 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10205 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10206 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   10207 			sc->sc_tbi_linkup = 1;
   10208 			if_link_state_change(ifp, LINK_STATE_UP);
   10209 		} else {
   10210 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10211 				device_xname(sc->sc_dev)));
   10212 			sc->sc_tbi_linkup = 0;
   10213 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10214 		}
   10215 		/* Update LED */
   10216 		wm_tbi_serdes_set_linkled(sc);
   10217 	} else if (icr & ICR_RXSEQ)
   10218 		DPRINTF(sc, WM_DEBUG_LINK,
   10219 		    ("%s: LINK: Receive sequence error\n",
   10220 			device_xname(sc->sc_dev)));
   10221 }
   10222 
   10223 /*
   10224  * wm_linkintr_serdes:
   10225  *
   10226  *	Helper; handle link interrupts for TBI mode.
   10227  */
   10228 static void
   10229 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   10230 {
   10231 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10232 	struct mii_data *mii = &sc->sc_mii;
   10233 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10234 	uint32_t pcs_adv, pcs_lpab, reg;
   10235 
   10236 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10237 		__func__));
   10238 
   10239 	if (icr & ICR_LSC) {
   10240 		/* Check PCS */
   10241 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10242 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   10243 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   10244 				device_xname(sc->sc_dev)));
   10245 			mii->mii_media_status |= IFM_ACTIVE;
   10246 			sc->sc_tbi_linkup = 1;
   10247 			if_link_state_change(ifp, LINK_STATE_UP);
   10248 		} else {
   10249 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10250 				device_xname(sc->sc_dev)));
   10251 			mii->mii_media_status |= IFM_NONE;
   10252 			sc->sc_tbi_linkup = 0;
   10253 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10254 			wm_tbi_serdes_set_linkled(sc);
   10255 			return;
   10256 		}
   10257 		mii->mii_media_active |= IFM_1000_SX;
   10258 		if ((reg & PCS_LSTS_FDX) != 0)
   10259 			mii->mii_media_active |= IFM_FDX;
   10260 		else
   10261 			mii->mii_media_active |= IFM_HDX;
   10262 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10263 			/* Check flow */
   10264 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10265 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10266 				DPRINTF(sc, WM_DEBUG_LINK,
   10267 				    ("XXX LINKOK but not ACOMP\n"));
   10268 				return;
   10269 			}
   10270 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10271 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10272 			DPRINTF(sc, WM_DEBUG_LINK,
   10273 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   10274 			if ((pcs_adv & TXCW_SYM_PAUSE)
   10275 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10276 				mii->mii_media_active |= IFM_FLOW
   10277 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10278 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10279 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10280 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   10281 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10282 				mii->mii_media_active |= IFM_FLOW
   10283 				    | IFM_ETH_TXPAUSE;
   10284 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   10285 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10286 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10287 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10288 				mii->mii_media_active |= IFM_FLOW
   10289 				    | IFM_ETH_RXPAUSE;
   10290 		}
   10291 		/* Update LED */
   10292 		wm_tbi_serdes_set_linkled(sc);
   10293 	} else
   10294 		DPRINTF(sc, WM_DEBUG_LINK,
   10295 		    ("%s: LINK: Receive sequence error\n",
   10296 		    device_xname(sc->sc_dev)));
   10297 }
   10298 
   10299 /*
   10300  * wm_linkintr:
   10301  *
   10302  *	Helper; handle link interrupts.
   10303  */
   10304 static void
   10305 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   10306 {
   10307 
   10308 	KASSERT(mutex_owned(sc->sc_core_lock));
   10309 
   10310 	if (sc->sc_flags & WM_F_HAS_MII)
   10311 		wm_linkintr_gmii(sc, icr);
   10312 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10313 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   10314 		wm_linkintr_serdes(sc, icr);
   10315 	else
   10316 		wm_linkintr_tbi(sc, icr);
   10317 }
   10318 
   10319 
   10320 static inline void
   10321 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   10322 {
   10323 
   10324 	if (wmq->wmq_txrx_use_workqueue) {
   10325 		if (!wmq->wmq_wq_enqueued) {
   10326 			wmq->wmq_wq_enqueued = true;
   10327 			workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie,
   10328 			    curcpu());
   10329 		}
   10330 	} else
   10331 		softint_schedule(wmq->wmq_si);
   10332 }
   10333 
   10334 static inline void
   10335 wm_legacy_intr_disable(struct wm_softc *sc)
   10336 {
   10337 
   10338 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   10339 }
   10340 
   10341 static inline void
   10342 wm_legacy_intr_enable(struct wm_softc *sc)
   10343 {
   10344 
   10345 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   10346 }
   10347 
   10348 /*
   10349  * wm_intr_legacy:
   10350  *
   10351  *	Interrupt service routine for INTx and MSI.
   10352  */
   10353 static int
   10354 wm_intr_legacy(void *arg)
   10355 {
   10356 	struct wm_softc *sc = arg;
   10357 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10358 	struct wm_queue *wmq = &sc->sc_queue[0];
   10359 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10360 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10361 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10362 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10363 	uint32_t icr, rndval = 0;
   10364 	bool more = false;
   10365 
   10366 	icr = CSR_READ(sc, WMREG_ICR);
   10367 	if ((icr & sc->sc_icr) == 0)
   10368 		return 0;
   10369 
   10370 	DPRINTF(sc, WM_DEBUG_TX,
   10371 	    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   10372 	if (rndval == 0)
   10373 		rndval = icr;
   10374 
   10375 	mutex_enter(txq->txq_lock);
   10376 
   10377 	if (txq->txq_stopping) {
   10378 		mutex_exit(txq->txq_lock);
   10379 		return 1;
   10380 	}
   10381 
   10382 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10383 	if (icr & ICR_TXDW) {
   10384 		DPRINTF(sc, WM_DEBUG_TX,
   10385 		    ("%s: TX: got TXDW interrupt\n",
   10386 			device_xname(sc->sc_dev)));
   10387 		WM_Q_EVCNT_INCR(txq, txdw);
   10388 	}
   10389 #endif
   10390 	if (txlimit > 0) {
   10391 		more |= wm_txeof(txq, txlimit);
   10392 		if (!IF_IS_EMPTY(&ifp->if_snd))
   10393 			more = true;
   10394 	} else
   10395 		more = true;
   10396 	mutex_exit(txq->txq_lock);
   10397 
   10398 	mutex_enter(rxq->rxq_lock);
   10399 
   10400 	if (rxq->rxq_stopping) {
   10401 		mutex_exit(rxq->rxq_lock);
   10402 		return 1;
   10403 	}
   10404 
   10405 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10406 	if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   10407 		DPRINTF(sc, WM_DEBUG_RX,
   10408 		    ("%s: RX: got Rx intr %#" __PRIxBIT "\n",
   10409 			device_xname(sc->sc_dev),
   10410 			icr & (ICR_RXDMT0 | ICR_RXT0)));
   10411 		WM_Q_EVCNT_INCR(rxq, intr);
   10412 	}
   10413 #endif
   10414 	if (rxlimit > 0) {
   10415 		/*
   10416 		 * wm_rxeof() does *not* call upper layer functions directly,
   10417 		 * as if_percpuq_enqueue() just call softint_schedule().
   10418 		 * So, we can call wm_rxeof() in interrupt context.
   10419 		 */
   10420 		more = wm_rxeof(rxq, rxlimit);
   10421 	} else
   10422 		more = true;
   10423 
   10424 	mutex_exit(rxq->rxq_lock);
   10425 
   10426 	mutex_enter(sc->sc_core_lock);
   10427 
   10428 	if (sc->sc_core_stopping) {
   10429 		mutex_exit(sc->sc_core_lock);
   10430 		return 1;
   10431 	}
   10432 
   10433 	if (icr & (ICR_LSC | ICR_RXSEQ)) {
   10434 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10435 		wm_linkintr(sc, icr);
   10436 	}
   10437 	if ((icr & ICR_GPI(0)) != 0)
   10438 		device_printf(sc->sc_dev, "got module interrupt\n");
   10439 
   10440 	mutex_exit(sc->sc_core_lock);
   10441 
   10442 	if (icr & ICR_RXO) {
   10443 #if defined(WM_DEBUG)
   10444 		log(LOG_WARNING, "%s: Receive overrun\n",
   10445 		    device_xname(sc->sc_dev));
   10446 #endif /* defined(WM_DEBUG) */
   10447 	}
   10448 
   10449 	rnd_add_uint32(&sc->rnd_source, rndval);
   10450 
   10451 	if (more) {
   10452 		/* Try to get more packets going. */
   10453 		wm_legacy_intr_disable(sc);
   10454 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10455 		wm_sched_handle_queue(sc, wmq);
   10456 	}
   10457 
   10458 	return 1;
   10459 }
   10460 
   10461 static inline void
   10462 wm_txrxintr_disable(struct wm_queue *wmq)
   10463 {
   10464 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10465 
   10466 	if (__predict_false(!wm_is_using_msix(sc))) {
   10467 		wm_legacy_intr_disable(sc);
   10468 		return;
   10469 	}
   10470 
   10471 	if (sc->sc_type == WM_T_82574)
   10472 		CSR_WRITE(sc, WMREG_IMC,
   10473 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   10474 	else if (sc->sc_type == WM_T_82575)
   10475 		CSR_WRITE(sc, WMREG_EIMC,
   10476 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10477 	else
   10478 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   10479 }
   10480 
   10481 static inline void
   10482 wm_txrxintr_enable(struct wm_queue *wmq)
   10483 {
   10484 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10485 
   10486 	wm_itrs_calculate(sc, wmq);
   10487 
   10488 	if (__predict_false(!wm_is_using_msix(sc))) {
   10489 		wm_legacy_intr_enable(sc);
   10490 		return;
   10491 	}
   10492 
   10493 	/*
   10494 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   10495 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   10496 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   10497 	 * while each wm_handle_queue(wmq) is runnig.
   10498 	 */
   10499 	if (sc->sc_type == WM_T_82574)
   10500 		CSR_WRITE(sc, WMREG_IMS,
   10501 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   10502 	else if (sc->sc_type == WM_T_82575)
   10503 		CSR_WRITE(sc, WMREG_EIMS,
   10504 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10505 	else
   10506 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   10507 }
   10508 
   10509 static int
   10510 wm_txrxintr_msix(void *arg)
   10511 {
   10512 	struct wm_queue *wmq = arg;
   10513 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10514 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10515 	struct wm_softc *sc = txq->txq_sc;
   10516 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10517 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10518 	bool txmore;
   10519 	bool rxmore;
   10520 
   10521 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   10522 
   10523 	DPRINTF(sc, WM_DEBUG_TX,
   10524 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   10525 
   10526 	wm_txrxintr_disable(wmq);
   10527 
   10528 	mutex_enter(txq->txq_lock);
   10529 
   10530 	if (txq->txq_stopping) {
   10531 		mutex_exit(txq->txq_lock);
   10532 		return 1;
   10533 	}
   10534 
   10535 	WM_Q_EVCNT_INCR(txq, txdw);
   10536 	if (txlimit > 0) {
   10537 		txmore = wm_txeof(txq, txlimit);
   10538 		/* wm_deferred start() is done in wm_handle_queue(). */
   10539 	} else
   10540 		txmore = true;
   10541 	mutex_exit(txq->txq_lock);
   10542 
   10543 	DPRINTF(sc, WM_DEBUG_RX,
   10544 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   10545 	mutex_enter(rxq->rxq_lock);
   10546 
   10547 	if (rxq->rxq_stopping) {
   10548 		mutex_exit(rxq->rxq_lock);
   10549 		return 1;
   10550 	}
   10551 
   10552 	WM_Q_EVCNT_INCR(rxq, intr);
   10553 	if (rxlimit > 0) {
   10554 		rxmore = wm_rxeof(rxq, rxlimit);
   10555 	} else
   10556 		rxmore = true;
   10557 	mutex_exit(rxq->rxq_lock);
   10558 
   10559 	wm_itrs_writereg(sc, wmq);
   10560 
   10561 	if (txmore || rxmore) {
   10562 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10563 		wm_sched_handle_queue(sc, wmq);
   10564 	} else
   10565 		wm_txrxintr_enable(wmq);
   10566 
   10567 	return 1;
   10568 }
   10569 
   10570 static void
   10571 wm_handle_queue(void *arg)
   10572 {
   10573 	struct wm_queue *wmq = arg;
   10574 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10575 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10576 	struct wm_softc *sc = txq->txq_sc;
   10577 	u_int txlimit = sc->sc_tx_process_limit;
   10578 	u_int rxlimit = sc->sc_rx_process_limit;
   10579 	bool txmore;
   10580 	bool rxmore;
   10581 
   10582 	mutex_enter(txq->txq_lock);
   10583 	if (txq->txq_stopping) {
   10584 		mutex_exit(txq->txq_lock);
   10585 		return;
   10586 	}
   10587 	txmore = wm_txeof(txq, txlimit);
   10588 	wm_deferred_start_locked(txq);
   10589 	mutex_exit(txq->txq_lock);
   10590 
   10591 	mutex_enter(rxq->rxq_lock);
   10592 	if (rxq->rxq_stopping) {
   10593 		mutex_exit(rxq->rxq_lock);
   10594 		return;
   10595 	}
   10596 	WM_Q_EVCNT_INCR(rxq, defer);
   10597 	rxmore = wm_rxeof(rxq, rxlimit);
   10598 	mutex_exit(rxq->rxq_lock);
   10599 
   10600 	if (txmore || rxmore) {
   10601 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10602 		wm_sched_handle_queue(sc, wmq);
   10603 	} else
   10604 		wm_txrxintr_enable(wmq);
   10605 }
   10606 
   10607 static void
   10608 wm_handle_queue_work(struct work *wk, void *context)
   10609 {
   10610 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   10611 
   10612 	/*
   10613 	 * Some qemu environment workaround.  They don't stop interrupt
   10614 	 * immediately.
   10615 	 */
   10616 	wmq->wmq_wq_enqueued = false;
   10617 	wm_handle_queue(wmq);
   10618 }
   10619 
   10620 /*
   10621  * wm_linkintr_msix:
   10622  *
   10623  *	Interrupt service routine for link status change for MSI-X.
   10624  */
   10625 static int
   10626 wm_linkintr_msix(void *arg)
   10627 {
   10628 	struct wm_softc *sc = arg;
   10629 	uint32_t reg;
   10630 	bool has_rxo;
   10631 
   10632 	reg = CSR_READ(sc, WMREG_ICR);
   10633 	mutex_enter(sc->sc_core_lock);
   10634 	DPRINTF(sc, WM_DEBUG_LINK,
   10635 	    ("%s: LINK: got link intr. ICR = %08x\n",
   10636 		device_xname(sc->sc_dev), reg));
   10637 
   10638 	if (sc->sc_core_stopping)
   10639 		goto out;
   10640 
   10641 	if ((reg & ICR_LSC) != 0) {
   10642 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10643 		wm_linkintr(sc, ICR_LSC);
   10644 	}
   10645 	if ((reg & ICR_GPI(0)) != 0)
   10646 		device_printf(sc->sc_dev, "got module interrupt\n");
   10647 
   10648 	/*
   10649 	 * XXX 82574 MSI-X mode workaround
   10650 	 *
   10651 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   10652 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   10653 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   10654 	 * interrupts by writing WMREG_ICS to process receive packets.
   10655 	 */
   10656 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   10657 #if defined(WM_DEBUG)
   10658 		log(LOG_WARNING, "%s: Receive overrun\n",
   10659 		    device_xname(sc->sc_dev));
   10660 #endif /* defined(WM_DEBUG) */
   10661 
   10662 		has_rxo = true;
   10663 		/*
   10664 		 * The RXO interrupt is very high rate when receive traffic is
   10665 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   10666 		 * interrupts. ICR_OTHER will be enabled at the end of
   10667 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   10668 		 * ICR_RXQ(1) interrupts.
   10669 		 */
   10670 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   10671 
   10672 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   10673 	}
   10674 
   10675 
   10676 
   10677 out:
   10678 	mutex_exit(sc->sc_core_lock);
   10679 
   10680 	if (sc->sc_type == WM_T_82574) {
   10681 		if (!has_rxo)
   10682 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   10683 		else
   10684 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   10685 	} else if (sc->sc_type == WM_T_82575)
   10686 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   10687 	else
   10688 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   10689 
   10690 	return 1;
   10691 }
   10692 
   10693 /*
   10694  * Media related.
   10695  * GMII, SGMII, TBI (and SERDES)
   10696  */
   10697 
   10698 /* Common */
   10699 
   10700 /*
   10701  * wm_tbi_serdes_set_linkled:
   10702  *
   10703  *	Update the link LED on TBI and SERDES devices.
   10704  */
   10705 static void
   10706 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   10707 {
   10708 
   10709 	if (sc->sc_tbi_linkup)
   10710 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   10711 	else
   10712 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   10713 
   10714 	/* 82540 or newer devices are active low */
   10715 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   10716 
   10717 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10718 }
   10719 
   10720 /* GMII related */
   10721 
   10722 /*
   10723  * wm_gmii_reset:
   10724  *
   10725  *	Reset the PHY.
   10726  */
   10727 static void
   10728 wm_gmii_reset(struct wm_softc *sc)
   10729 {
   10730 	uint32_t reg;
   10731 	int rv;
   10732 
   10733 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10734 		device_xname(sc->sc_dev), __func__));
   10735 
   10736 	rv = sc->phy.acquire(sc);
   10737 	if (rv != 0) {
   10738 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10739 		    __func__);
   10740 		return;
   10741 	}
   10742 
   10743 	switch (sc->sc_type) {
   10744 	case WM_T_82542_2_0:
   10745 	case WM_T_82542_2_1:
   10746 		/* null */
   10747 		break;
   10748 	case WM_T_82543:
   10749 		/*
   10750 		 * With 82543, we need to force speed and duplex on the MAC
   10751 		 * equal to what the PHY speed and duplex configuration is.
   10752 		 * In addition, we need to perform a hardware reset on the PHY
   10753 		 * to take it out of reset.
   10754 		 */
   10755 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10756 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10757 
   10758 		/* The PHY reset pin is active-low. */
   10759 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10760 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   10761 		    CTRL_EXT_SWDPIN(4));
   10762 		reg |= CTRL_EXT_SWDPIO(4);
   10763 
   10764 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10765 		CSR_WRITE_FLUSH(sc);
   10766 		delay(10*1000);
   10767 
   10768 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   10769 		CSR_WRITE_FLUSH(sc);
   10770 		delay(150);
   10771 #if 0
   10772 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   10773 #endif
   10774 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   10775 		break;
   10776 	case WM_T_82544:	/* Reset 10000us */
   10777 	case WM_T_82540:
   10778 	case WM_T_82545:
   10779 	case WM_T_82545_3:
   10780 	case WM_T_82546:
   10781 	case WM_T_82546_3:
   10782 	case WM_T_82541:
   10783 	case WM_T_82541_2:
   10784 	case WM_T_82547:
   10785 	case WM_T_82547_2:
   10786 	case WM_T_82571:	/* Reset 100us */
   10787 	case WM_T_82572:
   10788 	case WM_T_82573:
   10789 	case WM_T_82574:
   10790 	case WM_T_82575:
   10791 	case WM_T_82576:
   10792 	case WM_T_82580:
   10793 	case WM_T_I350:
   10794 	case WM_T_I354:
   10795 	case WM_T_I210:
   10796 	case WM_T_I211:
   10797 	case WM_T_82583:
   10798 	case WM_T_80003:
   10799 		/* Generic reset */
   10800 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10801 		CSR_WRITE_FLUSH(sc);
   10802 		delay(20000);
   10803 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10804 		CSR_WRITE_FLUSH(sc);
   10805 		delay(20000);
   10806 
   10807 		if ((sc->sc_type == WM_T_82541)
   10808 		    || (sc->sc_type == WM_T_82541_2)
   10809 		    || (sc->sc_type == WM_T_82547)
   10810 		    || (sc->sc_type == WM_T_82547_2)) {
   10811 			/* Workaround for igp are done in igp_reset() */
   10812 			/* XXX add code to set LED after phy reset */
   10813 		}
   10814 		break;
   10815 	case WM_T_ICH8:
   10816 	case WM_T_ICH9:
   10817 	case WM_T_ICH10:
   10818 	case WM_T_PCH:
   10819 	case WM_T_PCH2:
   10820 	case WM_T_PCH_LPT:
   10821 	case WM_T_PCH_SPT:
   10822 	case WM_T_PCH_CNP:
   10823 		/* Generic reset */
   10824 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10825 		CSR_WRITE_FLUSH(sc);
   10826 		delay(100);
   10827 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10828 		CSR_WRITE_FLUSH(sc);
   10829 		delay(150);
   10830 		break;
   10831 	default:
   10832 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   10833 		    __func__);
   10834 		break;
   10835 	}
   10836 
   10837 	sc->phy.release(sc);
   10838 
   10839 	/* get_cfg_done */
   10840 	wm_get_cfg_done(sc);
   10841 
   10842 	/* Extra setup */
   10843 	switch (sc->sc_type) {
   10844 	case WM_T_82542_2_0:
   10845 	case WM_T_82542_2_1:
   10846 	case WM_T_82543:
   10847 	case WM_T_82544:
   10848 	case WM_T_82540:
   10849 	case WM_T_82545:
   10850 	case WM_T_82545_3:
   10851 	case WM_T_82546:
   10852 	case WM_T_82546_3:
   10853 	case WM_T_82541_2:
   10854 	case WM_T_82547_2:
   10855 	case WM_T_82571:
   10856 	case WM_T_82572:
   10857 	case WM_T_82573:
   10858 	case WM_T_82574:
   10859 	case WM_T_82583:
   10860 	case WM_T_82575:
   10861 	case WM_T_82576:
   10862 	case WM_T_82580:
   10863 	case WM_T_I350:
   10864 	case WM_T_I354:
   10865 	case WM_T_I210:
   10866 	case WM_T_I211:
   10867 	case WM_T_80003:
   10868 		/* Null */
   10869 		break;
   10870 	case WM_T_82541:
   10871 	case WM_T_82547:
   10872 		/* XXX Configure actively LED after PHY reset */
   10873 		break;
   10874 	case WM_T_ICH8:
   10875 	case WM_T_ICH9:
   10876 	case WM_T_ICH10:
   10877 	case WM_T_PCH:
   10878 	case WM_T_PCH2:
   10879 	case WM_T_PCH_LPT:
   10880 	case WM_T_PCH_SPT:
   10881 	case WM_T_PCH_CNP:
   10882 		wm_phy_post_reset(sc);
   10883 		break;
   10884 	default:
   10885 		panic("%s: unknown type\n", __func__);
   10886 		break;
   10887 	}
   10888 }
   10889 
   10890 /*
   10891  * Set up sc_phytype and mii_{read|write}reg.
   10892  *
   10893  *  To identify PHY type, correct read/write function should be selected.
   10894  * To select correct read/write function, PCI ID or MAC type are required
   10895  * without accessing PHY registers.
   10896  *
   10897  *  On the first call of this function, PHY ID is not known yet. Check
   10898  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   10899  * result might be incorrect.
   10900  *
   10901  *  In the second call, PHY OUI and model is used to identify PHY type.
   10902  * It might not be perfect because of the lack of compared entry, but it
   10903  * would be better than the first call.
   10904  *
   10905  *  If the detected new result and previous assumption is different,
   10906  * a diagnostic message will be printed.
   10907  */
   10908 static void
   10909 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   10910     uint16_t phy_model)
   10911 {
   10912 	device_t dev = sc->sc_dev;
   10913 	struct mii_data *mii = &sc->sc_mii;
   10914 	uint16_t new_phytype = WMPHY_UNKNOWN;
   10915 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   10916 	mii_readreg_t new_readreg;
   10917 	mii_writereg_t new_writereg;
   10918 	bool dodiag = true;
   10919 
   10920 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10921 		device_xname(sc->sc_dev), __func__));
   10922 
   10923 	/*
   10924 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   10925 	 * incorrect. So don't print diag output when it's 2nd call.
   10926 	 */
   10927 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   10928 		dodiag = false;
   10929 
   10930 	if (mii->mii_readreg == NULL) {
   10931 		/*
   10932 		 *  This is the first call of this function. For ICH and PCH
   10933 		 * variants, it's difficult to determine the PHY access method
   10934 		 * by sc_type, so use the PCI product ID for some devices.
   10935 		 */
   10936 
   10937 		switch (sc->sc_pcidevid) {
   10938 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   10939 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   10940 			/* 82577 */
   10941 			new_phytype = WMPHY_82577;
   10942 			break;
   10943 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   10944 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   10945 			/* 82578 */
   10946 			new_phytype = WMPHY_82578;
   10947 			break;
   10948 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   10949 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   10950 			/* 82579 */
   10951 			new_phytype = WMPHY_82579;
   10952 			break;
   10953 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   10954 		case PCI_PRODUCT_INTEL_82801I_BM:
   10955 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   10956 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   10957 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   10958 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   10959 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   10960 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   10961 			/* ICH8, 9, 10 with 82567 */
   10962 			new_phytype = WMPHY_BM;
   10963 			break;
   10964 		default:
   10965 			break;
   10966 		}
   10967 	} else {
   10968 		/* It's not the first call. Use PHY OUI and model */
   10969 		switch (phy_oui) {
   10970 		case MII_OUI_ATTANSIC: /* atphy(4) */
   10971 			switch (phy_model) {
   10972 			case MII_MODEL_ATTANSIC_AR8021:
   10973 				new_phytype = WMPHY_82578;
   10974 				break;
   10975 			default:
   10976 				break;
   10977 			}
   10978 			break;
   10979 		case MII_OUI_xxMARVELL:
   10980 			switch (phy_model) {
   10981 			case MII_MODEL_xxMARVELL_I210:
   10982 				new_phytype = WMPHY_I210;
   10983 				break;
   10984 			case MII_MODEL_xxMARVELL_E1011:
   10985 			case MII_MODEL_xxMARVELL_E1000_3:
   10986 			case MII_MODEL_xxMARVELL_E1000_5:
   10987 			case MII_MODEL_xxMARVELL_E1112:
   10988 				new_phytype = WMPHY_M88;
   10989 				break;
   10990 			case MII_MODEL_xxMARVELL_E1149:
   10991 				new_phytype = WMPHY_BM;
   10992 				break;
   10993 			case MII_MODEL_xxMARVELL_E1111:
   10994 			case MII_MODEL_xxMARVELL_I347:
   10995 			case MII_MODEL_xxMARVELL_E1512:
   10996 			case MII_MODEL_xxMARVELL_E1340M:
   10997 			case MII_MODEL_xxMARVELL_E1543:
   10998 				new_phytype = WMPHY_M88;
   10999 				break;
   11000 			case MII_MODEL_xxMARVELL_I82563:
   11001 				new_phytype = WMPHY_GG82563;
   11002 				break;
   11003 			default:
   11004 				break;
   11005 			}
   11006 			break;
   11007 		case MII_OUI_INTEL:
   11008 			switch (phy_model) {
   11009 			case MII_MODEL_INTEL_I82577:
   11010 				new_phytype = WMPHY_82577;
   11011 				break;
   11012 			case MII_MODEL_INTEL_I82579:
   11013 				new_phytype = WMPHY_82579;
   11014 				break;
   11015 			case MII_MODEL_INTEL_I217:
   11016 				new_phytype = WMPHY_I217;
   11017 				break;
   11018 			case MII_MODEL_INTEL_I82580:
   11019 				new_phytype = WMPHY_82580;
   11020 				break;
   11021 			case MII_MODEL_INTEL_I350:
   11022 				new_phytype = WMPHY_I350;
   11023 				break;
   11024 			default:
   11025 				break;
   11026 			}
   11027 			break;
   11028 		case MII_OUI_yyINTEL:
   11029 			switch (phy_model) {
   11030 			case MII_MODEL_yyINTEL_I82562G:
   11031 			case MII_MODEL_yyINTEL_I82562EM:
   11032 			case MII_MODEL_yyINTEL_I82562ET:
   11033 				new_phytype = WMPHY_IFE;
   11034 				break;
   11035 			case MII_MODEL_yyINTEL_IGP01E1000:
   11036 				new_phytype = WMPHY_IGP;
   11037 				break;
   11038 			case MII_MODEL_yyINTEL_I82566:
   11039 				new_phytype = WMPHY_IGP_3;
   11040 				break;
   11041 			default:
   11042 				break;
   11043 			}
   11044 			break;
   11045 		default:
   11046 			break;
   11047 		}
   11048 
   11049 		if (dodiag) {
   11050 			if (new_phytype == WMPHY_UNKNOWN)
   11051 				aprint_verbose_dev(dev,
   11052 				    "%s: Unknown PHY model. OUI=%06x, "
   11053 				    "model=%04x\n", __func__, phy_oui,
   11054 				    phy_model);
   11055 
   11056 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11057 			    && (sc->sc_phytype != new_phytype)) {
   11058 				aprint_error_dev(dev, "Previously assumed PHY "
   11059 				    "type(%u) was incorrect. PHY type from PHY"
   11060 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   11061 			}
   11062 		}
   11063 	}
   11064 
   11065 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   11066 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   11067 		/* SGMII */
   11068 		new_readreg = wm_sgmii_readreg;
   11069 		new_writereg = wm_sgmii_writereg;
   11070 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11071 		/* BM2 (phyaddr == 1) */
   11072 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11073 		    && (new_phytype != WMPHY_BM)
   11074 		    && (new_phytype != WMPHY_UNKNOWN))
   11075 			doubt_phytype = new_phytype;
   11076 		new_phytype = WMPHY_BM;
   11077 		new_readreg = wm_gmii_bm_readreg;
   11078 		new_writereg = wm_gmii_bm_writereg;
   11079 	} else if (sc->sc_type >= WM_T_PCH) {
   11080 		/* All PCH* use _hv_ */
   11081 		new_readreg = wm_gmii_hv_readreg;
   11082 		new_writereg = wm_gmii_hv_writereg;
   11083 	} else if (sc->sc_type >= WM_T_ICH8) {
   11084 		/* non-82567 ICH8, 9 and 10 */
   11085 		new_readreg = wm_gmii_i82544_readreg;
   11086 		new_writereg = wm_gmii_i82544_writereg;
   11087 	} else if (sc->sc_type >= WM_T_80003) {
   11088 		/* 80003 */
   11089 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11090 		    && (new_phytype != WMPHY_GG82563)
   11091 		    && (new_phytype != WMPHY_UNKNOWN))
   11092 			doubt_phytype = new_phytype;
   11093 		new_phytype = WMPHY_GG82563;
   11094 		new_readreg = wm_gmii_i80003_readreg;
   11095 		new_writereg = wm_gmii_i80003_writereg;
   11096 	} else if (sc->sc_type >= WM_T_I210) {
   11097 		/* I210 and I211 */
   11098 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11099 		    && (new_phytype != WMPHY_I210)
   11100 		    && (new_phytype != WMPHY_UNKNOWN))
   11101 			doubt_phytype = new_phytype;
   11102 		new_phytype = WMPHY_I210;
   11103 		new_readreg = wm_gmii_gs40g_readreg;
   11104 		new_writereg = wm_gmii_gs40g_writereg;
   11105 	} else if (sc->sc_type >= WM_T_82580) {
   11106 		/* 82580, I350 and I354 */
   11107 		new_readreg = wm_gmii_82580_readreg;
   11108 		new_writereg = wm_gmii_82580_writereg;
   11109 	} else if (sc->sc_type >= WM_T_82544) {
   11110 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   11111 		new_readreg = wm_gmii_i82544_readreg;
   11112 		new_writereg = wm_gmii_i82544_writereg;
   11113 	} else {
   11114 		new_readreg = wm_gmii_i82543_readreg;
   11115 		new_writereg = wm_gmii_i82543_writereg;
   11116 	}
   11117 
   11118 	if (new_phytype == WMPHY_BM) {
   11119 		/* All BM use _bm_ */
   11120 		new_readreg = wm_gmii_bm_readreg;
   11121 		new_writereg = wm_gmii_bm_writereg;
   11122 	}
   11123 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   11124 		/* All PCH* use _hv_ */
   11125 		new_readreg = wm_gmii_hv_readreg;
   11126 		new_writereg = wm_gmii_hv_writereg;
   11127 	}
   11128 
   11129 	/* Diag output */
   11130 	if (dodiag) {
   11131 		if (doubt_phytype != WMPHY_UNKNOWN)
   11132 			aprint_error_dev(dev, "Assumed new PHY type was "
   11133 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   11134 			    new_phytype);
   11135 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11136 		    && (sc->sc_phytype != new_phytype))
   11137 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   11138 			    "was incorrect. New PHY type = %u\n",
   11139 			    sc->sc_phytype, new_phytype);
   11140 
   11141 		if ((mii->mii_readreg != NULL) &&
   11142 		    (new_phytype == WMPHY_UNKNOWN))
   11143 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   11144 
   11145 		if ((mii->mii_readreg != NULL) &&
   11146 		    (mii->mii_readreg != new_readreg))
   11147 			aprint_error_dev(dev, "Previously assumed PHY "
   11148 			    "read/write function was incorrect.\n");
   11149 	}
   11150 
   11151 	/* Update now */
   11152 	sc->sc_phytype = new_phytype;
   11153 	mii->mii_readreg = new_readreg;
   11154 	mii->mii_writereg = new_writereg;
   11155 	if (new_readreg == wm_gmii_hv_readreg) {
   11156 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   11157 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   11158 	} else if (new_readreg == wm_sgmii_readreg) {
   11159 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   11160 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   11161 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   11162 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   11163 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   11164 	}
   11165 }
   11166 
   11167 /*
   11168  * wm_get_phy_id_82575:
   11169  *
   11170  * Return PHY ID. Return -1 if it failed.
   11171  */
   11172 static int
   11173 wm_get_phy_id_82575(struct wm_softc *sc)
   11174 {
   11175 	uint32_t reg;
   11176 	int phyid = -1;
   11177 
   11178 	/* XXX */
   11179 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   11180 		return -1;
   11181 
   11182 	if (wm_sgmii_uses_mdio(sc)) {
   11183 		switch (sc->sc_type) {
   11184 		case WM_T_82575:
   11185 		case WM_T_82576:
   11186 			reg = CSR_READ(sc, WMREG_MDIC);
   11187 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   11188 			break;
   11189 		case WM_T_82580:
   11190 		case WM_T_I350:
   11191 		case WM_T_I354:
   11192 		case WM_T_I210:
   11193 		case WM_T_I211:
   11194 			reg = CSR_READ(sc, WMREG_MDICNFG);
   11195 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   11196 			break;
   11197 		default:
   11198 			return -1;
   11199 		}
   11200 	}
   11201 
   11202 	return phyid;
   11203 }
   11204 
   11205 /*
   11206  * wm_gmii_mediainit:
   11207  *
   11208  *	Initialize media for use on 1000BASE-T devices.
   11209  */
   11210 static void
   11211 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   11212 {
   11213 	device_t dev = sc->sc_dev;
   11214 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11215 	struct mii_data *mii = &sc->sc_mii;
   11216 
   11217 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11218 		device_xname(sc->sc_dev), __func__));
   11219 
   11220 	/* We have GMII. */
   11221 	sc->sc_flags |= WM_F_HAS_MII;
   11222 
   11223 	if (sc->sc_type == WM_T_80003)
   11224 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11225 	else
   11226 		sc->sc_tipg = TIPG_1000T_DFLT;
   11227 
   11228 	/*
   11229 	 * Let the chip set speed/duplex on its own based on
   11230 	 * signals from the PHY.
   11231 	 * XXXbouyer - I'm not sure this is right for the 80003,
   11232 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   11233 	 */
   11234 	sc->sc_ctrl |= CTRL_SLU;
   11235 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11236 
   11237 	/* Initialize our media structures and probe the GMII. */
   11238 	mii->mii_ifp = ifp;
   11239 
   11240 	mii->mii_statchg = wm_gmii_statchg;
   11241 
   11242 	/* get PHY control from SMBus to PCIe */
   11243 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   11244 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   11245 	    || (sc->sc_type == WM_T_PCH_CNP))
   11246 		wm_init_phy_workarounds_pchlan(sc);
   11247 
   11248 	wm_gmii_reset(sc);
   11249 
   11250 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11251 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   11252 	    wm_gmii_mediastatus, sc->sc_core_lock);
   11253 
   11254 	/* Setup internal SGMII PHY for SFP */
   11255 	wm_sgmii_sfp_preconfig(sc);
   11256 
   11257 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   11258 	    || (sc->sc_type == WM_T_82580)
   11259 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   11260 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   11261 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   11262 			/* Attach only one port */
   11263 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   11264 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11265 		} else {
   11266 			int i, id;
   11267 			uint32_t ctrl_ext;
   11268 
   11269 			id = wm_get_phy_id_82575(sc);
   11270 			if (id != -1) {
   11271 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   11272 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   11273 			}
   11274 			if ((id == -1)
   11275 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11276 				/* Power on sgmii phy if it is disabled */
   11277 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11278 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   11279 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   11280 				CSR_WRITE_FLUSH(sc);
   11281 				delay(300*1000); /* XXX too long */
   11282 
   11283 				/*
   11284 				 * From 1 to 8.
   11285 				 *
   11286 				 * I2C access fails with I2C register's ERROR
   11287 				 * bit set, so prevent error message while
   11288 				 * scanning.
   11289 				 */
   11290 				sc->phy.no_errprint = true;
   11291 				for (i = 1; i < 8; i++)
   11292 					mii_attach(sc->sc_dev, &sc->sc_mii,
   11293 					    0xffffffff, i, MII_OFFSET_ANY,
   11294 					    MIIF_DOPAUSE);
   11295 				sc->phy.no_errprint = false;
   11296 
   11297 				/* Restore previous sfp cage power state */
   11298 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11299 			}
   11300 		}
   11301 	} else
   11302 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11303 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11304 
   11305 	/*
   11306 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   11307 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   11308 	 */
   11309 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   11310 		|| (sc->sc_type == WM_T_PCH_SPT)
   11311 		|| (sc->sc_type == WM_T_PCH_CNP))
   11312 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11313 		wm_set_mdio_slow_mode_hv(sc);
   11314 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11315 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11316 	}
   11317 
   11318 	/*
   11319 	 * (For ICH8 variants)
   11320 	 * If PHY detection failed, use BM's r/w function and retry.
   11321 	 */
   11322 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11323 		/* if failed, retry with *_bm_* */
   11324 		aprint_verbose_dev(dev, "Assumed PHY access function "
   11325 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   11326 		    sc->sc_phytype);
   11327 		sc->sc_phytype = WMPHY_BM;
   11328 		mii->mii_readreg = wm_gmii_bm_readreg;
   11329 		mii->mii_writereg = wm_gmii_bm_writereg;
   11330 
   11331 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11332 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11333 	}
   11334 
   11335 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11336 		/* Any PHY wasn't found */
   11337 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   11338 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   11339 		sc->sc_phytype = WMPHY_NONE;
   11340 	} else {
   11341 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   11342 
   11343 		/*
   11344 		 * PHY found! Check PHY type again by the second call of
   11345 		 * wm_gmii_setup_phytype.
   11346 		 */
   11347 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   11348 		    child->mii_mpd_model);
   11349 
   11350 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   11351 	}
   11352 }
   11353 
   11354 /*
   11355  * wm_gmii_mediachange:	[ifmedia interface function]
   11356  *
   11357  *	Set hardware to newly-selected media on a 1000BASE-T device.
   11358  */
   11359 static int
   11360 wm_gmii_mediachange(struct ifnet *ifp)
   11361 {
   11362 	struct wm_softc *sc = ifp->if_softc;
   11363 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11364 	uint32_t reg;
   11365 	int rc;
   11366 
   11367 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11368 		device_xname(sc->sc_dev), __func__));
   11369 
   11370 	KASSERT(mutex_owned(sc->sc_core_lock));
   11371 
   11372 	if ((sc->sc_if_flags & IFF_UP) == 0)
   11373 		return 0;
   11374 
   11375 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   11376 	if ((sc->sc_type == WM_T_82580)
   11377 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   11378 	    || (sc->sc_type == WM_T_I211)) {
   11379 		reg = CSR_READ(sc, WMREG_PHPM);
   11380 		reg &= ~PHPM_GO_LINK_D;
   11381 		CSR_WRITE(sc, WMREG_PHPM, reg);
   11382 	}
   11383 
   11384 	/* Disable D0 LPLU. */
   11385 	wm_lplu_d0_disable(sc);
   11386 
   11387 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   11388 	sc->sc_ctrl |= CTRL_SLU;
   11389 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11390 	    || (sc->sc_type > WM_T_82543)) {
   11391 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   11392 	} else {
   11393 		sc->sc_ctrl &= ~CTRL_ASDE;
   11394 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   11395 		if (ife->ifm_media & IFM_FDX)
   11396 			sc->sc_ctrl |= CTRL_FD;
   11397 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   11398 		case IFM_10_T:
   11399 			sc->sc_ctrl |= CTRL_SPEED_10;
   11400 			break;
   11401 		case IFM_100_TX:
   11402 			sc->sc_ctrl |= CTRL_SPEED_100;
   11403 			break;
   11404 		case IFM_1000_T:
   11405 			sc->sc_ctrl |= CTRL_SPEED_1000;
   11406 			break;
   11407 		case IFM_NONE:
   11408 			/* There is no specific setting for IFM_NONE */
   11409 			break;
   11410 		default:
   11411 			panic("wm_gmii_mediachange: bad media 0x%x",
   11412 			    ife->ifm_media);
   11413 		}
   11414 	}
   11415 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11416 	CSR_WRITE_FLUSH(sc);
   11417 
   11418 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11419 		wm_serdes_mediachange(ifp);
   11420 
   11421 	if (sc->sc_type <= WM_T_82543)
   11422 		wm_gmii_reset(sc);
   11423 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   11424 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   11425 		/* allow time for SFP cage time to power up phy */
   11426 		delay(300 * 1000);
   11427 		wm_gmii_reset(sc);
   11428 	}
   11429 
   11430 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   11431 		return 0;
   11432 	return rc;
   11433 }
   11434 
   11435 /*
   11436  * wm_gmii_mediastatus:	[ifmedia interface function]
   11437  *
   11438  *	Get the current interface media status on a 1000BASE-T device.
   11439  */
   11440 static void
   11441 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11442 {
   11443 	struct wm_softc *sc = ifp->if_softc;
   11444 
   11445 	KASSERT(mutex_owned(sc->sc_core_lock));
   11446 
   11447 	ether_mediastatus(ifp, ifmr);
   11448 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11449 	    | sc->sc_flowflags;
   11450 }
   11451 
   11452 #define	MDI_IO		CTRL_SWDPIN(2)
   11453 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   11454 #define	MDI_CLK		CTRL_SWDPIN(3)
   11455 
   11456 static void
   11457 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   11458 {
   11459 	uint32_t i, v;
   11460 
   11461 	v = CSR_READ(sc, WMREG_CTRL);
   11462 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11463 	v |= MDI_DIR | CTRL_SWDPIO(3);
   11464 
   11465 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   11466 		if (data & i)
   11467 			v |= MDI_IO;
   11468 		else
   11469 			v &= ~MDI_IO;
   11470 		CSR_WRITE(sc, WMREG_CTRL, v);
   11471 		CSR_WRITE_FLUSH(sc);
   11472 		delay(10);
   11473 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11474 		CSR_WRITE_FLUSH(sc);
   11475 		delay(10);
   11476 		CSR_WRITE(sc, WMREG_CTRL, v);
   11477 		CSR_WRITE_FLUSH(sc);
   11478 		delay(10);
   11479 	}
   11480 }
   11481 
   11482 static uint16_t
   11483 wm_i82543_mii_recvbits(struct wm_softc *sc)
   11484 {
   11485 	uint32_t v, i;
   11486 	uint16_t data = 0;
   11487 
   11488 	v = CSR_READ(sc, WMREG_CTRL);
   11489 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11490 	v |= CTRL_SWDPIO(3);
   11491 
   11492 	CSR_WRITE(sc, WMREG_CTRL, v);
   11493 	CSR_WRITE_FLUSH(sc);
   11494 	delay(10);
   11495 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11496 	CSR_WRITE_FLUSH(sc);
   11497 	delay(10);
   11498 	CSR_WRITE(sc, WMREG_CTRL, v);
   11499 	CSR_WRITE_FLUSH(sc);
   11500 	delay(10);
   11501 
   11502 	for (i = 0; i < 16; i++) {
   11503 		data <<= 1;
   11504 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11505 		CSR_WRITE_FLUSH(sc);
   11506 		delay(10);
   11507 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   11508 			data |= 1;
   11509 		CSR_WRITE(sc, WMREG_CTRL, v);
   11510 		CSR_WRITE_FLUSH(sc);
   11511 		delay(10);
   11512 	}
   11513 
   11514 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11515 	CSR_WRITE_FLUSH(sc);
   11516 	delay(10);
   11517 	CSR_WRITE(sc, WMREG_CTRL, v);
   11518 	CSR_WRITE_FLUSH(sc);
   11519 	delay(10);
   11520 
   11521 	return data;
   11522 }
   11523 
   11524 #undef MDI_IO
   11525 #undef MDI_DIR
   11526 #undef MDI_CLK
   11527 
   11528 /*
   11529  * wm_gmii_i82543_readreg:	[mii interface function]
   11530  *
   11531  *	Read a PHY register on the GMII (i82543 version).
   11532  */
   11533 static int
   11534 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11535 {
   11536 	struct wm_softc *sc = device_private(dev);
   11537 
   11538 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11539 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   11540 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   11541 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   11542 
   11543 	DPRINTF(sc, WM_DEBUG_GMII,
   11544 	    ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   11545 		device_xname(dev), phy, reg, *val));
   11546 
   11547 	return 0;
   11548 }
   11549 
   11550 /*
   11551  * wm_gmii_i82543_writereg:	[mii interface function]
   11552  *
   11553  *	Write a PHY register on the GMII (i82543 version).
   11554  */
   11555 static int
   11556 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   11557 {
   11558 	struct wm_softc *sc = device_private(dev);
   11559 
   11560 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11561 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   11562 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   11563 	    (MII_COMMAND_START << 30), 32);
   11564 
   11565 	return 0;
   11566 }
   11567 
   11568 /*
   11569  * wm_gmii_mdic_readreg:	[mii interface function]
   11570  *
   11571  *	Read a PHY register on the GMII.
   11572  */
   11573 static int
   11574 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11575 {
   11576 	struct wm_softc *sc = device_private(dev);
   11577 	uint32_t mdic = 0;
   11578 	int i;
   11579 
   11580 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11581 	    && (reg > MII_ADDRMASK)) {
   11582 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11583 		    __func__, sc->sc_phytype, reg);
   11584 		reg &= MII_ADDRMASK;
   11585 	}
   11586 
   11587 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   11588 	    MDIC_REGADD(reg));
   11589 
   11590 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11591 		delay(50);
   11592 		mdic = CSR_READ(sc, WMREG_MDIC);
   11593 		if (mdic & MDIC_READY)
   11594 			break;
   11595 	}
   11596 
   11597 	if ((mdic & MDIC_READY) == 0) {
   11598 		DPRINTF(sc, WM_DEBUG_GMII,
   11599 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   11600 			device_xname(dev), phy, reg));
   11601 		return ETIMEDOUT;
   11602 	} else if (mdic & MDIC_E) {
   11603 		/* This is normal if no PHY is present. */
   11604 		DPRINTF(sc, WM_DEBUG_GMII,
   11605 		    ("%s: MDIC read error: phy %d reg %d\n",
   11606 			device_xname(sc->sc_dev), phy, reg));
   11607 		return -1;
   11608 	} else
   11609 		*val = MDIC_DATA(mdic);
   11610 
   11611 	/*
   11612 	 * Allow some time after each MDIC transaction to avoid
   11613 	 * reading duplicate data in the next MDIC transaction.
   11614 	 */
   11615 	if (sc->sc_type == WM_T_PCH2)
   11616 		delay(100);
   11617 
   11618 	return 0;
   11619 }
   11620 
   11621 /*
   11622  * wm_gmii_mdic_writereg:	[mii interface function]
   11623  *
   11624  *	Write a PHY register on the GMII.
   11625  */
   11626 static int
   11627 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   11628 {
   11629 	struct wm_softc *sc = device_private(dev);
   11630 	uint32_t mdic = 0;
   11631 	int i;
   11632 
   11633 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11634 	    && (reg > MII_ADDRMASK)) {
   11635 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11636 		    __func__, sc->sc_phytype, reg);
   11637 		reg &= MII_ADDRMASK;
   11638 	}
   11639 
   11640 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   11641 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   11642 
   11643 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11644 		delay(50);
   11645 		mdic = CSR_READ(sc, WMREG_MDIC);
   11646 		if (mdic & MDIC_READY)
   11647 			break;
   11648 	}
   11649 
   11650 	if ((mdic & MDIC_READY) == 0) {
   11651 		DPRINTF(sc, WM_DEBUG_GMII,
   11652 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   11653 			device_xname(dev), phy, reg));
   11654 		return ETIMEDOUT;
   11655 	} else if (mdic & MDIC_E) {
   11656 		DPRINTF(sc, WM_DEBUG_GMII,
   11657 		    ("%s: MDIC write error: phy %d reg %d\n",
   11658 			device_xname(dev), phy, reg));
   11659 		return -1;
   11660 	}
   11661 
   11662 	/*
   11663 	 * Allow some time after each MDIC transaction to avoid
   11664 	 * reading duplicate data in the next MDIC transaction.
   11665 	 */
   11666 	if (sc->sc_type == WM_T_PCH2)
   11667 		delay(100);
   11668 
   11669 	return 0;
   11670 }
   11671 
   11672 /*
   11673  * wm_gmii_i82544_readreg:	[mii interface function]
   11674  *
   11675  *	Read a PHY register on the GMII.
   11676  */
   11677 static int
   11678 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11679 {
   11680 	struct wm_softc *sc = device_private(dev);
   11681 	int rv;
   11682 
   11683 	rv = sc->phy.acquire(sc);
   11684 	if (rv != 0) {
   11685 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11686 		return rv;
   11687 	}
   11688 
   11689 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   11690 
   11691 	sc->phy.release(sc);
   11692 
   11693 	return rv;
   11694 }
   11695 
   11696 static int
   11697 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11698 {
   11699 	struct wm_softc *sc = device_private(dev);
   11700 	int rv;
   11701 
   11702 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11703 		switch (sc->sc_phytype) {
   11704 		case WMPHY_IGP:
   11705 		case WMPHY_IGP_2:
   11706 		case WMPHY_IGP_3:
   11707 			rv = wm_gmii_mdic_writereg(dev, phy,
   11708 			    IGPHY_PAGE_SELECT, reg);
   11709 			if (rv != 0)
   11710 				return rv;
   11711 			break;
   11712 		default:
   11713 #ifdef WM_DEBUG
   11714 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   11715 			    __func__, sc->sc_phytype, reg);
   11716 #endif
   11717 			break;
   11718 		}
   11719 	}
   11720 
   11721 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11722 }
   11723 
   11724 /*
   11725  * wm_gmii_i82544_writereg:	[mii interface function]
   11726  *
   11727  *	Write a PHY register on the GMII.
   11728  */
   11729 static int
   11730 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   11731 {
   11732 	struct wm_softc *sc = device_private(dev);
   11733 	int rv;
   11734 
   11735 	rv = sc->phy.acquire(sc);
   11736 	if (rv != 0) {
   11737 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11738 		return rv;
   11739 	}
   11740 
   11741 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   11742 	sc->phy.release(sc);
   11743 
   11744 	return rv;
   11745 }
   11746 
   11747 static int
   11748 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11749 {
   11750 	struct wm_softc *sc = device_private(dev);
   11751 	int rv;
   11752 
   11753 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11754 		switch (sc->sc_phytype) {
   11755 		case WMPHY_IGP:
   11756 		case WMPHY_IGP_2:
   11757 		case WMPHY_IGP_3:
   11758 			rv = wm_gmii_mdic_writereg(dev, phy,
   11759 			    IGPHY_PAGE_SELECT, reg);
   11760 			if (rv != 0)
   11761 				return rv;
   11762 			break;
   11763 		default:
   11764 #ifdef WM_DEBUG
   11765 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   11766 			    __func__, sc->sc_phytype, reg);
   11767 #endif
   11768 			break;
   11769 		}
   11770 	}
   11771 
   11772 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11773 }
   11774 
   11775 /*
   11776  * wm_gmii_i80003_readreg:	[mii interface function]
   11777  *
   11778  *	Read a PHY register on the kumeran
   11779  * This could be handled by the PHY layer if we didn't have to lock the
   11780  * resource ...
   11781  */
   11782 static int
   11783 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11784 {
   11785 	struct wm_softc *sc = device_private(dev);
   11786 	int page_select;
   11787 	uint16_t temp, temp2;
   11788 	int rv;
   11789 
   11790 	if (phy != 1) /* Only one PHY on kumeran bus */
   11791 		return -1;
   11792 
   11793 	rv = sc->phy.acquire(sc);
   11794 	if (rv != 0) {
   11795 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11796 		return rv;
   11797 	}
   11798 
   11799 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11800 		page_select = GG82563_PHY_PAGE_SELECT;
   11801 	else {
   11802 		/*
   11803 		 * Use Alternative Page Select register to access registers
   11804 		 * 30 and 31.
   11805 		 */
   11806 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11807 	}
   11808 	temp = reg >> GG82563_PAGE_SHIFT;
   11809 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11810 		goto out;
   11811 
   11812 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11813 		/*
   11814 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11815 		 * register.
   11816 		 */
   11817 		delay(200);
   11818 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11819 		if ((rv != 0) || (temp2 != temp)) {
   11820 			device_printf(dev, "%s failed\n", __func__);
   11821 			rv = -1;
   11822 			goto out;
   11823 		}
   11824 		delay(200);
   11825 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11826 		delay(200);
   11827 	} else
   11828 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11829 
   11830 out:
   11831 	sc->phy.release(sc);
   11832 	return rv;
   11833 }
   11834 
   11835 /*
   11836  * wm_gmii_i80003_writereg:	[mii interface function]
   11837  *
   11838  *	Write a PHY register on the kumeran.
   11839  * This could be handled by the PHY layer if we didn't have to lock the
   11840  * resource ...
   11841  */
   11842 static int
   11843 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   11844 {
   11845 	struct wm_softc *sc = device_private(dev);
   11846 	int page_select, rv;
   11847 	uint16_t temp, temp2;
   11848 
   11849 	if (phy != 1) /* Only one PHY on kumeran bus */
   11850 		return -1;
   11851 
   11852 	rv = sc->phy.acquire(sc);
   11853 	if (rv != 0) {
   11854 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11855 		return rv;
   11856 	}
   11857 
   11858 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11859 		page_select = GG82563_PHY_PAGE_SELECT;
   11860 	else {
   11861 		/*
   11862 		 * Use Alternative Page Select register to access registers
   11863 		 * 30 and 31.
   11864 		 */
   11865 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11866 	}
   11867 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   11868 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11869 		goto out;
   11870 
   11871 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11872 		/*
   11873 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11874 		 * register.
   11875 		 */
   11876 		delay(200);
   11877 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11878 		if ((rv != 0) || (temp2 != temp)) {
   11879 			device_printf(dev, "%s failed\n", __func__);
   11880 			rv = -1;
   11881 			goto out;
   11882 		}
   11883 		delay(200);
   11884 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11885 		delay(200);
   11886 	} else
   11887 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11888 
   11889 out:
   11890 	sc->phy.release(sc);
   11891 	return rv;
   11892 }
   11893 
   11894 /*
   11895  * wm_gmii_bm_readreg:	[mii interface function]
   11896  *
   11897  *	Read a PHY register on the kumeran
   11898  * This could be handled by the PHY layer if we didn't have to lock the
   11899  * resource ...
   11900  */
   11901 static int
   11902 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11903 {
   11904 	struct wm_softc *sc = device_private(dev);
   11905 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11906 	int rv;
   11907 
   11908 	rv = sc->phy.acquire(sc);
   11909 	if (rv != 0) {
   11910 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11911 		return rv;
   11912 	}
   11913 
   11914 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11915 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11916 		    || (reg == 31)) ? 1 : phy;
   11917 	/* Page 800 works differently than the rest so it has its own func */
   11918 	if (page == BM_WUC_PAGE) {
   11919 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11920 		goto release;
   11921 	}
   11922 
   11923 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11924 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11925 		    && (sc->sc_type != WM_T_82583))
   11926 			rv = wm_gmii_mdic_writereg(dev, phy,
   11927 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11928 		else
   11929 			rv = wm_gmii_mdic_writereg(dev, phy,
   11930 			    BME1000_PHY_PAGE_SELECT, page);
   11931 		if (rv != 0)
   11932 			goto release;
   11933 	}
   11934 
   11935 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11936 
   11937 release:
   11938 	sc->phy.release(sc);
   11939 	return rv;
   11940 }
   11941 
   11942 /*
   11943  * wm_gmii_bm_writereg:	[mii interface function]
   11944  *
   11945  *	Write a PHY register on the kumeran.
   11946  * This could be handled by the PHY layer if we didn't have to lock the
   11947  * resource ...
   11948  */
   11949 static int
   11950 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   11951 {
   11952 	struct wm_softc *sc = device_private(dev);
   11953 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11954 	int rv;
   11955 
   11956 	rv = sc->phy.acquire(sc);
   11957 	if (rv != 0) {
   11958 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11959 		return rv;
   11960 	}
   11961 
   11962 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11963 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11964 		    || (reg == 31)) ? 1 : phy;
   11965 	/* Page 800 works differently than the rest so it has its own func */
   11966 	if (page == BM_WUC_PAGE) {
   11967 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   11968 		goto release;
   11969 	}
   11970 
   11971 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11972 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11973 		    && (sc->sc_type != WM_T_82583))
   11974 			rv = wm_gmii_mdic_writereg(dev, phy,
   11975 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11976 		else
   11977 			rv = wm_gmii_mdic_writereg(dev, phy,
   11978 			    BME1000_PHY_PAGE_SELECT, page);
   11979 		if (rv != 0)
   11980 			goto release;
   11981 	}
   11982 
   11983 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11984 
   11985 release:
   11986 	sc->phy.release(sc);
   11987 	return rv;
   11988 }
   11989 
   11990 /*
   11991  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   11992  *  @dev: pointer to the HW structure
   11993  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   11994  *
   11995  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   11996  *  address to store contents of the BM_WUC_ENABLE_REG register.
   11997  */
   11998 static int
   11999 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   12000 {
   12001 #ifdef WM_DEBUG
   12002 	struct wm_softc *sc = device_private(dev);
   12003 #endif
   12004 	uint16_t temp;
   12005 	int rv;
   12006 
   12007 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   12008 		device_xname(dev), __func__));
   12009 
   12010 	if (!phy_regp)
   12011 		return -1;
   12012 
   12013 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   12014 
   12015 	/* Select Port Control Registers page */
   12016 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12017 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   12018 	if (rv != 0)
   12019 		return rv;
   12020 
   12021 	/* Read WUCE and save it */
   12022 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   12023 	if (rv != 0)
   12024 		return rv;
   12025 
   12026 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   12027 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   12028 	 */
   12029 	temp = *phy_regp;
   12030 	temp |= BM_WUC_ENABLE_BIT;
   12031 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   12032 
   12033 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   12034 		return rv;
   12035 
   12036 	/* Select Host Wakeup Registers page - caller now able to write
   12037 	 * registers on the Wakeup registers page
   12038 	 */
   12039 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12040 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   12041 }
   12042 
   12043 /*
   12044  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   12045  *  @dev: pointer to the HW structure
   12046  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   12047  *
   12048  *  Restore BM_WUC_ENABLE_REG to its original value.
   12049  *
   12050  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   12051  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   12052  *  caller.
   12053  */
   12054 static int
   12055 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   12056 {
   12057 #ifdef WM_DEBUG
   12058 	struct wm_softc *sc = device_private(dev);
   12059 #endif
   12060 
   12061 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   12062 		device_xname(dev), __func__));
   12063 
   12064 	if (!phy_regp)
   12065 		return -1;
   12066 
   12067 	/* Select Port Control Registers page */
   12068 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12069 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   12070 
   12071 	/* Restore 769.17 to its original value */
   12072 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   12073 
   12074 	return 0;
   12075 }
   12076 
   12077 /*
   12078  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   12079  *  @sc: pointer to the HW structure
   12080  *  @offset: register offset to be read or written
   12081  *  @val: pointer to the data to read or write
   12082  *  @rd: determines if operation is read or write
   12083  *  @page_set: BM_WUC_PAGE already set and access enabled
   12084  *
   12085  *  Read the PHY register at offset and store the retrieved information in
   12086  *  data, or write data to PHY register at offset.  Note the procedure to
   12087  *  access the PHY wakeup registers is different than reading the other PHY
   12088  *  registers. It works as such:
   12089  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   12090  *  2) Set page to 800 for host (801 if we were manageability)
   12091  *  3) Write the address using the address opcode (0x11)
   12092  *  4) Read or write the data using the data opcode (0x12)
   12093  *  5) Restore 769.17.2 to its original value
   12094  *
   12095  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   12096  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   12097  *
   12098  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   12099  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   12100  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   12101  */
   12102 static int
   12103 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   12104     bool page_set)
   12105 {
   12106 	struct wm_softc *sc = device_private(dev);
   12107 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   12108 	uint16_t page = BM_PHY_REG_PAGE(offset);
   12109 	uint16_t wuce;
   12110 	int rv = 0;
   12111 
   12112 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12113 		device_xname(dev), __func__));
   12114 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   12115 	if ((sc->sc_type == WM_T_PCH)
   12116 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   12117 		device_printf(dev,
   12118 		    "Attempting to access page %d while gig enabled.\n", page);
   12119 	}
   12120 
   12121 	if (!page_set) {
   12122 		/* Enable access to PHY wakeup registers */
   12123 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   12124 		if (rv != 0) {
   12125 			device_printf(dev,
   12126 			    "%s: Could not enable PHY wakeup reg access\n",
   12127 			    __func__);
   12128 			return rv;
   12129 		}
   12130 	}
   12131 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   12132 		device_xname(sc->sc_dev), __func__, page, regnum));
   12133 
   12134 	/*
   12135 	 * 2) Access PHY wakeup register.
   12136 	 * See wm_access_phy_wakeup_reg_bm.
   12137 	 */
   12138 
   12139 	/* Write the Wakeup register page offset value using opcode 0x11 */
   12140 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   12141 	if (rv != 0)
   12142 		return rv;
   12143 
   12144 	if (rd) {
   12145 		/* Read the Wakeup register page value using opcode 0x12 */
   12146 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   12147 	} else {
   12148 		/* Write the Wakeup register page value using opcode 0x12 */
   12149 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   12150 	}
   12151 	if (rv != 0)
   12152 		return rv;
   12153 
   12154 	if (!page_set)
   12155 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   12156 
   12157 	return rv;
   12158 }
   12159 
   12160 /*
   12161  * wm_gmii_hv_readreg:	[mii interface function]
   12162  *
   12163  *	Read a PHY register on the kumeran
   12164  * This could be handled by the PHY layer if we didn't have to lock the
   12165  * resource ...
   12166  */
   12167 static int
   12168 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12169 {
   12170 	struct wm_softc *sc = device_private(dev);
   12171 	int rv;
   12172 
   12173 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12174 		device_xname(dev), __func__));
   12175 
   12176 	rv = sc->phy.acquire(sc);
   12177 	if (rv != 0) {
   12178 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12179 		return rv;
   12180 	}
   12181 
   12182 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   12183 	sc->phy.release(sc);
   12184 	return rv;
   12185 }
   12186 
   12187 static int
   12188 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12189 {
   12190 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12191 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12192 	int rv;
   12193 
   12194 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12195 
   12196 	/* Page 800 works differently than the rest so it has its own func */
   12197 	if (page == BM_WUC_PAGE)
   12198 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   12199 
   12200 	/*
   12201 	 * Lower than page 768 works differently than the rest so it has its
   12202 	 * own func
   12203 	 */
   12204 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12205 		device_printf(dev, "gmii_hv_readreg!!!\n");
   12206 		return -1;
   12207 	}
   12208 
   12209 	/*
   12210 	 * XXX I21[789] documents say that the SMBus Address register is at
   12211 	 * PHY address 01, Page 0 (not 768), Register 26.
   12212 	 */
   12213 	if (page == HV_INTC_FC_PAGE_START)
   12214 		page = 0;
   12215 
   12216 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12217 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12218 		    page << BME1000_PAGE_SHIFT);
   12219 		if (rv != 0)
   12220 			return rv;
   12221 	}
   12222 
   12223 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   12224 }
   12225 
   12226 /*
   12227  * wm_gmii_hv_writereg:	[mii interface function]
   12228  *
   12229  *	Write a PHY register on the kumeran.
   12230  * This could be handled by the PHY layer if we didn't have to lock the
   12231  * resource ...
   12232  */
   12233 static int
   12234 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   12235 {
   12236 	struct wm_softc *sc = device_private(dev);
   12237 	int rv;
   12238 
   12239 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12240 		device_xname(dev), __func__));
   12241 
   12242 	rv = sc->phy.acquire(sc);
   12243 	if (rv != 0) {
   12244 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12245 		return rv;
   12246 	}
   12247 
   12248 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   12249 	sc->phy.release(sc);
   12250 
   12251 	return rv;
   12252 }
   12253 
   12254 static int
   12255 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12256 {
   12257 	struct wm_softc *sc = device_private(dev);
   12258 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12259 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12260 	int rv;
   12261 
   12262 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12263 
   12264 	/* Page 800 works differently than the rest so it has its own func */
   12265 	if (page == BM_WUC_PAGE)
   12266 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   12267 		    false);
   12268 
   12269 	/*
   12270 	 * Lower than page 768 works differently than the rest so it has its
   12271 	 * own func
   12272 	 */
   12273 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12274 		device_printf(dev, "gmii_hv_writereg!!!\n");
   12275 		return -1;
   12276 	}
   12277 
   12278 	{
   12279 		/*
   12280 		 * XXX I21[789] documents say that the SMBus Address register
   12281 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   12282 		 */
   12283 		if (page == HV_INTC_FC_PAGE_START)
   12284 			page = 0;
   12285 
   12286 		/*
   12287 		 * XXX Workaround MDIO accesses being disabled after entering
   12288 		 * IEEE Power Down (whenever bit 11 of the PHY control
   12289 		 * register is set)
   12290 		 */
   12291 		if (sc->sc_phytype == WMPHY_82578) {
   12292 			struct mii_softc *child;
   12293 
   12294 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12295 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   12296 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   12297 			    && ((val & (1 << 11)) != 0)) {
   12298 				device_printf(dev, "XXX need workaround\n");
   12299 			}
   12300 		}
   12301 
   12302 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12303 			rv = wm_gmii_mdic_writereg(dev, 1,
   12304 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12305 			if (rv != 0)
   12306 				return rv;
   12307 		}
   12308 	}
   12309 
   12310 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   12311 }
   12312 
   12313 /*
   12314  * wm_gmii_82580_readreg:	[mii interface function]
   12315  *
   12316  *	Read a PHY register on the 82580 and I350.
   12317  * This could be handled by the PHY layer if we didn't have to lock the
   12318  * resource ...
   12319  */
   12320 static int
   12321 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12322 {
   12323 	struct wm_softc *sc = device_private(dev);
   12324 	int rv;
   12325 
   12326 	rv = sc->phy.acquire(sc);
   12327 	if (rv != 0) {
   12328 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12329 		return rv;
   12330 	}
   12331 
   12332 #ifdef DIAGNOSTIC
   12333 	if (reg > MII_ADDRMASK) {
   12334 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12335 		    __func__, sc->sc_phytype, reg);
   12336 		reg &= MII_ADDRMASK;
   12337 	}
   12338 #endif
   12339 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   12340 
   12341 	sc->phy.release(sc);
   12342 	return rv;
   12343 }
   12344 
   12345 /*
   12346  * wm_gmii_82580_writereg:	[mii interface function]
   12347  *
   12348  *	Write a PHY register on the 82580 and I350.
   12349  * This could be handled by the PHY layer if we didn't have to lock the
   12350  * resource ...
   12351  */
   12352 static int
   12353 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   12354 {
   12355 	struct wm_softc *sc = device_private(dev);
   12356 	int rv;
   12357 
   12358 	rv = sc->phy.acquire(sc);
   12359 	if (rv != 0) {
   12360 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12361 		return rv;
   12362 	}
   12363 
   12364 #ifdef DIAGNOSTIC
   12365 	if (reg > MII_ADDRMASK) {
   12366 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12367 		    __func__, sc->sc_phytype, reg);
   12368 		reg &= MII_ADDRMASK;
   12369 	}
   12370 #endif
   12371 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   12372 
   12373 	sc->phy.release(sc);
   12374 	return rv;
   12375 }
   12376 
   12377 /*
   12378  * wm_gmii_gs40g_readreg:	[mii interface function]
   12379  *
   12380  *	Read a PHY register on the I2100 and I211.
   12381  * This could be handled by the PHY layer if we didn't have to lock the
   12382  * resource ...
   12383  */
   12384 static int
   12385 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12386 {
   12387 	struct wm_softc *sc = device_private(dev);
   12388 	int page, offset;
   12389 	int rv;
   12390 
   12391 	/* Acquire semaphore */
   12392 	rv = sc->phy.acquire(sc);
   12393 	if (rv != 0) {
   12394 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12395 		return rv;
   12396 	}
   12397 
   12398 	/* Page select */
   12399 	page = reg >> GS40G_PAGE_SHIFT;
   12400 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12401 	if (rv != 0)
   12402 		goto release;
   12403 
   12404 	/* Read reg */
   12405 	offset = reg & GS40G_OFFSET_MASK;
   12406 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   12407 
   12408 release:
   12409 	sc->phy.release(sc);
   12410 	return rv;
   12411 }
   12412 
   12413 /*
   12414  * wm_gmii_gs40g_writereg:	[mii interface function]
   12415  *
   12416  *	Write a PHY register on the I210 and I211.
   12417  * This could be handled by the PHY layer if we didn't have to lock the
   12418  * resource ...
   12419  */
   12420 static int
   12421 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   12422 {
   12423 	struct wm_softc *sc = device_private(dev);
   12424 	uint16_t page;
   12425 	int offset, rv;
   12426 
   12427 	/* Acquire semaphore */
   12428 	rv = sc->phy.acquire(sc);
   12429 	if (rv != 0) {
   12430 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12431 		return rv;
   12432 	}
   12433 
   12434 	/* Page select */
   12435 	page = reg >> GS40G_PAGE_SHIFT;
   12436 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12437 	if (rv != 0)
   12438 		goto release;
   12439 
   12440 	/* Write reg */
   12441 	offset = reg & GS40G_OFFSET_MASK;
   12442 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   12443 
   12444 release:
   12445 	/* Release semaphore */
   12446 	sc->phy.release(sc);
   12447 	return rv;
   12448 }
   12449 
   12450 /*
   12451  * wm_gmii_statchg:	[mii interface function]
   12452  *
   12453  *	Callback from MII layer when media changes.
   12454  */
   12455 static void
   12456 wm_gmii_statchg(struct ifnet *ifp)
   12457 {
   12458 	struct wm_softc *sc = ifp->if_softc;
   12459 	struct mii_data *mii = &sc->sc_mii;
   12460 
   12461 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   12462 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12463 	sc->sc_fcrtl &= ~FCRTL_XONE;
   12464 
   12465 	/* Get flow control negotiation result. */
   12466 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   12467 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   12468 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   12469 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   12470 	}
   12471 
   12472 	if (sc->sc_flowflags & IFM_FLOW) {
   12473 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   12474 			sc->sc_ctrl |= CTRL_TFCE;
   12475 			sc->sc_fcrtl |= FCRTL_XONE;
   12476 		}
   12477 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   12478 			sc->sc_ctrl |= CTRL_RFCE;
   12479 	}
   12480 
   12481 	if (mii->mii_media_active & IFM_FDX) {
   12482 		DPRINTF(sc, WM_DEBUG_LINK,
   12483 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   12484 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12485 	} else {
   12486 		DPRINTF(sc, WM_DEBUG_LINK,
   12487 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   12488 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12489 	}
   12490 
   12491 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12492 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12493 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12494 	    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12495 	if (sc->sc_type == WM_T_80003) {
   12496 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   12497 		case IFM_1000_T:
   12498 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12499 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   12500 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   12501 			break;
   12502 		default:
   12503 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12504 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   12505 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   12506 			break;
   12507 		}
   12508 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   12509 	}
   12510 }
   12511 
   12512 /* kumeran related (80003, ICH* and PCH*) */
   12513 
   12514 /*
   12515  * wm_kmrn_readreg:
   12516  *
   12517  *	Read a kumeran register
   12518  */
   12519 static int
   12520 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   12521 {
   12522 	int rv;
   12523 
   12524 	if (sc->sc_type == WM_T_80003)
   12525 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12526 	else
   12527 		rv = sc->phy.acquire(sc);
   12528 	if (rv != 0) {
   12529 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   12530 		    __func__);
   12531 		return rv;
   12532 	}
   12533 
   12534 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   12535 
   12536 	if (sc->sc_type == WM_T_80003)
   12537 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12538 	else
   12539 		sc->phy.release(sc);
   12540 
   12541 	return rv;
   12542 }
   12543 
   12544 static int
   12545 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   12546 {
   12547 
   12548 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12549 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   12550 	    KUMCTRLSTA_REN);
   12551 	CSR_WRITE_FLUSH(sc);
   12552 	delay(2);
   12553 
   12554 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   12555 
   12556 	return 0;
   12557 }
   12558 
   12559 /*
   12560  * wm_kmrn_writereg:
   12561  *
   12562  *	Write a kumeran register
   12563  */
   12564 static int
   12565 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   12566 {
   12567 	int rv;
   12568 
   12569 	if (sc->sc_type == WM_T_80003)
   12570 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12571 	else
   12572 		rv = sc->phy.acquire(sc);
   12573 	if (rv != 0) {
   12574 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   12575 		    __func__);
   12576 		return rv;
   12577 	}
   12578 
   12579 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   12580 
   12581 	if (sc->sc_type == WM_T_80003)
   12582 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12583 	else
   12584 		sc->phy.release(sc);
   12585 
   12586 	return rv;
   12587 }
   12588 
   12589 static int
   12590 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   12591 {
   12592 
   12593 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12594 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   12595 
   12596 	return 0;
   12597 }
   12598 
   12599 /*
   12600  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   12601  * This access method is different from IEEE MMD.
   12602  */
   12603 static int
   12604 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   12605 {
   12606 	struct wm_softc *sc = device_private(dev);
   12607 	int rv;
   12608 
   12609 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   12610 	if (rv != 0)
   12611 		return rv;
   12612 
   12613 	if (rd)
   12614 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   12615 	else
   12616 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   12617 	return rv;
   12618 }
   12619 
   12620 static int
   12621 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   12622 {
   12623 
   12624 	return wm_access_emi_reg_locked(dev, reg, val, true);
   12625 }
   12626 
   12627 static int
   12628 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   12629 {
   12630 
   12631 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   12632 }
   12633 
   12634 /* SGMII related */
   12635 
   12636 /*
   12637  * wm_sgmii_uses_mdio
   12638  *
   12639  * Check whether the transaction is to the internal PHY or the external
   12640  * MDIO interface. Return true if it's MDIO.
   12641  */
   12642 static bool
   12643 wm_sgmii_uses_mdio(struct wm_softc *sc)
   12644 {
   12645 	uint32_t reg;
   12646 	bool ismdio = false;
   12647 
   12648 	switch (sc->sc_type) {
   12649 	case WM_T_82575:
   12650 	case WM_T_82576:
   12651 		reg = CSR_READ(sc, WMREG_MDIC);
   12652 		ismdio = ((reg & MDIC_DEST) != 0);
   12653 		break;
   12654 	case WM_T_82580:
   12655 	case WM_T_I350:
   12656 	case WM_T_I354:
   12657 	case WM_T_I210:
   12658 	case WM_T_I211:
   12659 		reg = CSR_READ(sc, WMREG_MDICNFG);
   12660 		ismdio = ((reg & MDICNFG_DEST) != 0);
   12661 		break;
   12662 	default:
   12663 		break;
   12664 	}
   12665 
   12666 	return ismdio;
   12667 }
   12668 
   12669 /* Setup internal SGMII PHY for SFP */
   12670 static void
   12671 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
   12672 {
   12673 	uint16_t id1, id2, phyreg;
   12674 	int i, rv;
   12675 
   12676 	if (((sc->sc_flags & WM_F_SGMII) == 0)
   12677 	    || ((sc->sc_flags & WM_F_SFP) == 0))
   12678 		return;
   12679 
   12680 	for (i = 0; i < MII_NPHY; i++) {
   12681 		sc->phy.no_errprint = true;
   12682 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
   12683 		if (rv != 0)
   12684 			continue;
   12685 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
   12686 		if (rv != 0)
   12687 			continue;
   12688 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
   12689 			continue;
   12690 		sc->phy.no_errprint = false;
   12691 
   12692 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
   12693 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
   12694 		phyreg |= ESSR_SGMII_WOC_COPPER;
   12695 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
   12696 		break;
   12697 	}
   12698 
   12699 }
   12700 
   12701 /*
   12702  * wm_sgmii_readreg:	[mii interface function]
   12703  *
   12704  *	Read a PHY register on the SGMII
   12705  * This could be handled by the PHY layer if we didn't have to lock the
   12706  * resource ...
   12707  */
   12708 static int
   12709 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12710 {
   12711 	struct wm_softc *sc = device_private(dev);
   12712 	int rv;
   12713 
   12714 	rv = sc->phy.acquire(sc);
   12715 	if (rv != 0) {
   12716 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12717 		return rv;
   12718 	}
   12719 
   12720 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   12721 
   12722 	sc->phy.release(sc);
   12723 	return rv;
   12724 }
   12725 
   12726 static int
   12727 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12728 {
   12729 	struct wm_softc *sc = device_private(dev);
   12730 	uint32_t i2ccmd;
   12731 	int i, rv = 0;
   12732 
   12733 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12734 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12735 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12736 
   12737 	/* Poll the ready bit */
   12738 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12739 		delay(50);
   12740 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12741 		if (i2ccmd & I2CCMD_READY)
   12742 			break;
   12743 	}
   12744 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12745 		device_printf(dev, "I2CCMD Read did not complete\n");
   12746 		rv = ETIMEDOUT;
   12747 	}
   12748 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12749 		if (!sc->phy.no_errprint)
   12750 			device_printf(dev, "I2CCMD Error bit set\n");
   12751 		rv = EIO;
   12752 	}
   12753 
   12754 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   12755 
   12756 	return rv;
   12757 }
   12758 
   12759 /*
   12760  * wm_sgmii_writereg:	[mii interface function]
   12761  *
   12762  *	Write a PHY register on the SGMII.
   12763  * This could be handled by the PHY layer if we didn't have to lock the
   12764  * resource ...
   12765  */
   12766 static int
   12767 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   12768 {
   12769 	struct wm_softc *sc = device_private(dev);
   12770 	int rv;
   12771 
   12772 	rv = sc->phy.acquire(sc);
   12773 	if (rv != 0) {
   12774 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12775 		return rv;
   12776 	}
   12777 
   12778 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   12779 
   12780 	sc->phy.release(sc);
   12781 
   12782 	return rv;
   12783 }
   12784 
   12785 static int
   12786 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12787 {
   12788 	struct wm_softc *sc = device_private(dev);
   12789 	uint32_t i2ccmd;
   12790 	uint16_t swapdata;
   12791 	int rv = 0;
   12792 	int i;
   12793 
   12794 	/* Swap the data bytes for the I2C interface */
   12795 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   12796 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12797 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   12798 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12799 
   12800 	/* Poll the ready bit */
   12801 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12802 		delay(50);
   12803 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12804 		if (i2ccmd & I2CCMD_READY)
   12805 			break;
   12806 	}
   12807 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12808 		device_printf(dev, "I2CCMD Write did not complete\n");
   12809 		rv = ETIMEDOUT;
   12810 	}
   12811 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12812 		device_printf(dev, "I2CCMD Error bit set\n");
   12813 		rv = EIO;
   12814 	}
   12815 
   12816 	return rv;
   12817 }
   12818 
   12819 /* TBI related */
   12820 
   12821 static bool
   12822 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   12823 {
   12824 	bool sig;
   12825 
   12826 	sig = ctrl & CTRL_SWDPIN(1);
   12827 
   12828 	/*
   12829 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   12830 	 * detect a signal, 1 if they don't.
   12831 	 */
   12832 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   12833 		sig = !sig;
   12834 
   12835 	return sig;
   12836 }
   12837 
   12838 /*
   12839  * wm_tbi_mediainit:
   12840  *
   12841  *	Initialize media for use on 1000BASE-X devices.
   12842  */
   12843 static void
   12844 wm_tbi_mediainit(struct wm_softc *sc)
   12845 {
   12846 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12847 	const char *sep = "";
   12848 
   12849 	if (sc->sc_type < WM_T_82543)
   12850 		sc->sc_tipg = TIPG_WM_DFLT;
   12851 	else
   12852 		sc->sc_tipg = TIPG_LG_DFLT;
   12853 
   12854 	sc->sc_tbi_serdes_anegticks = 5;
   12855 
   12856 	/* Initialize our media structures */
   12857 	sc->sc_mii.mii_ifp = ifp;
   12858 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   12859 
   12860 	ifp->if_baudrate = IF_Gbps(1);
   12861 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   12862 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12863 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12864 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   12865 		    sc->sc_core_lock);
   12866 	} else {
   12867 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12868 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   12869 	}
   12870 
   12871 	/*
   12872 	 * SWD Pins:
   12873 	 *
   12874 	 *	0 = Link LED (output)
   12875 	 *	1 = Loss Of Signal (input)
   12876 	 */
   12877 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   12878 
   12879 	/* XXX Perhaps this is only for TBI */
   12880 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12881 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   12882 
   12883 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   12884 		sc->sc_ctrl &= ~CTRL_LRST;
   12885 
   12886 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12887 
   12888 #define	ADD(ss, mm, dd)							  \
   12889 do {									  \
   12890 	aprint_normal("%s%s", sep, ss);					  \
   12891 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   12892 	sep = ", ";							  \
   12893 } while (/*CONSTCOND*/0)
   12894 
   12895 	aprint_normal_dev(sc->sc_dev, "");
   12896 
   12897 	if (sc->sc_type == WM_T_I354) {
   12898 		uint32_t status;
   12899 
   12900 		status = CSR_READ(sc, WMREG_STATUS);
   12901 		if (((status & STATUS_2P5_SKU) != 0)
   12902 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12903 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   12904 		} else
   12905 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   12906 	} else if (sc->sc_type == WM_T_82545) {
   12907 		/* Only 82545 is LX (XXX except SFP) */
   12908 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12909 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12910 	} else if (sc->sc_sfptype != 0) {
   12911 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   12912 		switch (sc->sc_sfptype) {
   12913 		default:
   12914 		case SFF_SFP_ETH_FLAGS_1000SX:
   12915 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12916 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12917 			break;
   12918 		case SFF_SFP_ETH_FLAGS_1000LX:
   12919 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12920 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12921 			break;
   12922 		case SFF_SFP_ETH_FLAGS_1000CX:
   12923 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   12924 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   12925 			break;
   12926 		case SFF_SFP_ETH_FLAGS_1000T:
   12927 			ADD("1000baseT", IFM_1000_T, 0);
   12928 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   12929 			break;
   12930 		case SFF_SFP_ETH_FLAGS_100FX:
   12931 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   12932 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   12933 			break;
   12934 		}
   12935 	} else {
   12936 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12937 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12938 	}
   12939 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   12940 	aprint_normal("\n");
   12941 
   12942 #undef ADD
   12943 
   12944 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   12945 }
   12946 
   12947 /*
   12948  * wm_tbi_mediachange:	[ifmedia interface function]
   12949  *
   12950  *	Set hardware to newly-selected media on a 1000BASE-X device.
   12951  */
   12952 static int
   12953 wm_tbi_mediachange(struct ifnet *ifp)
   12954 {
   12955 	struct wm_softc *sc = ifp->if_softc;
   12956 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12957 	uint32_t status, ctrl;
   12958 	bool signal;
   12959 	int i;
   12960 
   12961 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   12962 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12963 		/* XXX need some work for >= 82571 and < 82575 */
   12964 		if (sc->sc_type < WM_T_82575)
   12965 			return 0;
   12966 	}
   12967 
   12968 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12969 	    || (sc->sc_type >= WM_T_82575))
   12970 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12971 
   12972 	sc->sc_ctrl &= ~CTRL_LRST;
   12973 	sc->sc_txcw = TXCW_ANE;
   12974 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12975 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   12976 	else if (ife->ifm_media & IFM_FDX)
   12977 		sc->sc_txcw |= TXCW_FD;
   12978 	else
   12979 		sc->sc_txcw |= TXCW_HD;
   12980 
   12981 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   12982 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   12983 
   12984 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   12985 		device_xname(sc->sc_dev), sc->sc_txcw));
   12986 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12987 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12988 	CSR_WRITE_FLUSH(sc);
   12989 	delay(1000);
   12990 
   12991 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12992 	signal = wm_tbi_havesignal(sc, ctrl);
   12993 
   12994 	DPRINTF(sc, WM_DEBUG_LINK,
   12995 	    ("%s: signal = %d\n", device_xname(sc->sc_dev), signal));
   12996 
   12997 	if (signal) {
   12998 		/* Have signal; wait for the link to come up. */
   12999 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   13000 			delay(10000);
   13001 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   13002 				break;
   13003 		}
   13004 
   13005 		DPRINTF(sc, WM_DEBUG_LINK,
   13006 		    ("%s: i = %d after waiting for link\n",
   13007 			device_xname(sc->sc_dev), i));
   13008 
   13009 		status = CSR_READ(sc, WMREG_STATUS);
   13010 		DPRINTF(sc, WM_DEBUG_LINK,
   13011 		    ("%s: status after final read = 0x%x, STATUS_LU = %#"
   13012 			__PRIxBIT "\n",
   13013 			device_xname(sc->sc_dev), status, STATUS_LU));
   13014 		if (status & STATUS_LU) {
   13015 			/* Link is up. */
   13016 			DPRINTF(sc, WM_DEBUG_LINK,
   13017 			    ("%s: LINK: set media -> link up %s\n",
   13018 				device_xname(sc->sc_dev),
   13019 				(status & STATUS_FD) ? "FDX" : "HDX"));
   13020 
   13021 			/*
   13022 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   13023 			 * so we should update sc->sc_ctrl
   13024 			 */
   13025 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   13026 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   13027 			sc->sc_fcrtl &= ~FCRTL_XONE;
   13028 			if (status & STATUS_FD)
   13029 				sc->sc_tctl |=
   13030 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   13031 			else
   13032 				sc->sc_tctl |=
   13033 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   13034 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   13035 				sc->sc_fcrtl |= FCRTL_XONE;
   13036 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   13037 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   13038 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   13039 			sc->sc_tbi_linkup = 1;
   13040 		} else {
   13041 			if (i == WM_LINKUP_TIMEOUT)
   13042 				wm_check_for_link(sc);
   13043 			/* Link is down. */
   13044 			DPRINTF(sc, WM_DEBUG_LINK,
   13045 			    ("%s: LINK: set media -> link down\n",
   13046 				device_xname(sc->sc_dev)));
   13047 			sc->sc_tbi_linkup = 0;
   13048 		}
   13049 	} else {
   13050 		DPRINTF(sc, WM_DEBUG_LINK,
   13051 		    ("%s: LINK: set media -> no signal\n",
   13052 			device_xname(sc->sc_dev)));
   13053 		sc->sc_tbi_linkup = 0;
   13054 	}
   13055 
   13056 	wm_tbi_serdes_set_linkled(sc);
   13057 
   13058 	return 0;
   13059 }
   13060 
   13061 /*
   13062  * wm_tbi_mediastatus:	[ifmedia interface function]
   13063  *
   13064  *	Get the current interface media status on a 1000BASE-X device.
   13065  */
   13066 static void
   13067 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13068 {
   13069 	struct wm_softc *sc = ifp->if_softc;
   13070 	uint32_t ctrl, status;
   13071 
   13072 	ifmr->ifm_status = IFM_AVALID;
   13073 	ifmr->ifm_active = IFM_ETHER;
   13074 
   13075 	status = CSR_READ(sc, WMREG_STATUS);
   13076 	if ((status & STATUS_LU) == 0) {
   13077 		ifmr->ifm_active |= IFM_NONE;
   13078 		return;
   13079 	}
   13080 
   13081 	ifmr->ifm_status |= IFM_ACTIVE;
   13082 	/* Only 82545 is LX */
   13083 	if (sc->sc_type == WM_T_82545)
   13084 		ifmr->ifm_active |= IFM_1000_LX;
   13085 	else
   13086 		ifmr->ifm_active |= IFM_1000_SX;
   13087 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   13088 		ifmr->ifm_active |= IFM_FDX;
   13089 	else
   13090 		ifmr->ifm_active |= IFM_HDX;
   13091 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13092 	if (ctrl & CTRL_RFCE)
   13093 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   13094 	if (ctrl & CTRL_TFCE)
   13095 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   13096 }
   13097 
   13098 /* XXX TBI only */
   13099 static int
   13100 wm_check_for_link(struct wm_softc *sc)
   13101 {
   13102 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   13103 	uint32_t rxcw;
   13104 	uint32_t ctrl;
   13105 	uint32_t status;
   13106 	bool signal;
   13107 
   13108 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
   13109 		device_xname(sc->sc_dev), __func__));
   13110 
   13111 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   13112 		/* XXX need some work for >= 82571 */
   13113 		if (sc->sc_type >= WM_T_82571) {
   13114 			sc->sc_tbi_linkup = 1;
   13115 			return 0;
   13116 		}
   13117 	}
   13118 
   13119 	rxcw = CSR_READ(sc, WMREG_RXCW);
   13120 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13121 	status = CSR_READ(sc, WMREG_STATUS);
   13122 	signal = wm_tbi_havesignal(sc, ctrl);
   13123 
   13124 	DPRINTF(sc, WM_DEBUG_LINK,
   13125 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   13126 		device_xname(sc->sc_dev), __func__, signal,
   13127 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   13128 
   13129 	/*
   13130 	 * SWDPIN   LU RXCW
   13131 	 *	0    0	  0
   13132 	 *	0    0	  1	(should not happen)
   13133 	 *	0    1	  0	(should not happen)
   13134 	 *	0    1	  1	(should not happen)
   13135 	 *	1    0	  0	Disable autonego and force linkup
   13136 	 *	1    0	  1	got /C/ but not linkup yet
   13137 	 *	1    1	  0	(linkup)
   13138 	 *	1    1	  1	If IFM_AUTO, back to autonego
   13139 	 *
   13140 	 */
   13141 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   13142 		DPRINTF(sc, WM_DEBUG_LINK,
   13143 		    ("%s: %s: force linkup and fullduplex\n",
   13144 			device_xname(sc->sc_dev), __func__));
   13145 		sc->sc_tbi_linkup = 0;
   13146 		/* Disable auto-negotiation in the TXCW register */
   13147 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   13148 
   13149 		/*
   13150 		 * Force link-up and also force full-duplex.
   13151 		 *
   13152 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   13153 		 * so we should update sc->sc_ctrl
   13154 		 */
   13155 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   13156 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13157 	} else if (((status & STATUS_LU) != 0)
   13158 	    && ((rxcw & RXCW_C) != 0)
   13159 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   13160 		sc->sc_tbi_linkup = 1;
   13161 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   13162 			device_xname(sc->sc_dev), __func__));
   13163 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13164 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   13165 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   13166 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
   13167 			device_xname(sc->sc_dev), __func__));
   13168 	} else {
   13169 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   13170 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   13171 			status));
   13172 	}
   13173 
   13174 	return 0;
   13175 }
   13176 
   13177 /*
   13178  * wm_tbi_tick:
   13179  *
   13180  *	Check the link on TBI devices.
   13181  *	This function acts as mii_tick().
   13182  */
   13183 static void
   13184 wm_tbi_tick(struct wm_softc *sc)
   13185 {
   13186 	struct mii_data *mii = &sc->sc_mii;
   13187 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13188 	uint32_t status;
   13189 
   13190 	KASSERT(mutex_owned(sc->sc_core_lock));
   13191 
   13192 	status = CSR_READ(sc, WMREG_STATUS);
   13193 
   13194 	/* XXX is this needed? */
   13195 	(void)CSR_READ(sc, WMREG_RXCW);
   13196 	(void)CSR_READ(sc, WMREG_CTRL);
   13197 
   13198 	/* set link status */
   13199 	if ((status & STATUS_LU) == 0) {
   13200 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   13201 			device_xname(sc->sc_dev)));
   13202 		sc->sc_tbi_linkup = 0;
   13203 	} else if (sc->sc_tbi_linkup == 0) {
   13204 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   13205 			device_xname(sc->sc_dev),
   13206 			(status & STATUS_FD) ? "FDX" : "HDX"));
   13207 		sc->sc_tbi_linkup = 1;
   13208 		sc->sc_tbi_serdes_ticks = 0;
   13209 	}
   13210 
   13211 	if ((sc->sc_if_flags & IFF_UP) == 0)
   13212 		goto setled;
   13213 
   13214 	if ((status & STATUS_LU) == 0) {
   13215 		sc->sc_tbi_linkup = 0;
   13216 		/* If the timer expired, retry autonegotiation */
   13217 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13218 		    && (++sc->sc_tbi_serdes_ticks
   13219 			>= sc->sc_tbi_serdes_anegticks)) {
   13220 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13221 				device_xname(sc->sc_dev), __func__));
   13222 			sc->sc_tbi_serdes_ticks = 0;
   13223 			/*
   13224 			 * Reset the link, and let autonegotiation do
   13225 			 * its thing
   13226 			 */
   13227 			sc->sc_ctrl |= CTRL_LRST;
   13228 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13229 			CSR_WRITE_FLUSH(sc);
   13230 			delay(1000);
   13231 			sc->sc_ctrl &= ~CTRL_LRST;
   13232 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13233 			CSR_WRITE_FLUSH(sc);
   13234 			delay(1000);
   13235 			CSR_WRITE(sc, WMREG_TXCW,
   13236 			    sc->sc_txcw & ~TXCW_ANE);
   13237 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13238 		}
   13239 	}
   13240 
   13241 setled:
   13242 	wm_tbi_serdes_set_linkled(sc);
   13243 }
   13244 
   13245 /* SERDES related */
   13246 static void
   13247 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   13248 {
   13249 	uint32_t reg;
   13250 
   13251 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13252 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13253 		return;
   13254 
   13255 	/* Enable PCS to turn on link */
   13256 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   13257 	reg |= PCS_CFG_PCS_EN;
   13258 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   13259 
   13260 	/* Power up the laser */
   13261 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13262 	reg &= ~CTRL_EXT_SWDPIN(3);
   13263 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13264 
   13265 	/* Flush the write to verify completion */
   13266 	CSR_WRITE_FLUSH(sc);
   13267 	delay(1000);
   13268 }
   13269 
   13270 static int
   13271 wm_serdes_mediachange(struct ifnet *ifp)
   13272 {
   13273 	struct wm_softc *sc = ifp->if_softc;
   13274 	bool pcs_autoneg = true; /* XXX */
   13275 	uint32_t ctrl_ext, pcs_lctl, reg;
   13276 
   13277 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13278 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13279 		return 0;
   13280 
   13281 	/* XXX Currently, this function is not called on 8257[12] */
   13282 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   13283 	    || (sc->sc_type >= WM_T_82575))
   13284 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   13285 
   13286 	/* Power on the sfp cage if present */
   13287 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13288 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   13289 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   13290 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13291 
   13292 	sc->sc_ctrl |= CTRL_SLU;
   13293 
   13294 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   13295 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   13296 
   13297 		reg = CSR_READ(sc, WMREG_CONNSW);
   13298 		reg |= CONNSW_ENRGSRC;
   13299 		CSR_WRITE(sc, WMREG_CONNSW, reg);
   13300 	}
   13301 
   13302 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   13303 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   13304 	case CTRL_EXT_LINK_MODE_SGMII:
   13305 		/* SGMII mode lets the phy handle forcing speed/duplex */
   13306 		pcs_autoneg = true;
   13307 		/* Autoneg time out should be disabled for SGMII mode */
   13308 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   13309 		break;
   13310 	case CTRL_EXT_LINK_MODE_1000KX:
   13311 		pcs_autoneg = false;
   13312 		/* FALLTHROUGH */
   13313 	default:
   13314 		if ((sc->sc_type == WM_T_82575)
   13315 		    || (sc->sc_type == WM_T_82576)) {
   13316 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   13317 				pcs_autoneg = false;
   13318 		}
   13319 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   13320 		    | CTRL_FRCFDX;
   13321 
   13322 		/* Set speed of 1000/Full if speed/duplex is forced */
   13323 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   13324 	}
   13325 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13326 
   13327 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   13328 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   13329 
   13330 	if (pcs_autoneg) {
   13331 		/* Set PCS register for autoneg */
   13332 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   13333 
   13334 		/* Disable force flow control for autoneg */
   13335 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   13336 
   13337 		/* Configure flow control advertisement for autoneg */
   13338 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   13339 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   13340 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   13341 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   13342 	} else
   13343 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   13344 
   13345 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   13346 
   13347 	return 0;
   13348 }
   13349 
   13350 static void
   13351 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13352 {
   13353 	struct wm_softc *sc = ifp->if_softc;
   13354 	struct mii_data *mii = &sc->sc_mii;
   13355 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13356 	uint32_t pcs_adv, pcs_lpab, reg;
   13357 
   13358 	ifmr->ifm_status = IFM_AVALID;
   13359 	ifmr->ifm_active = IFM_ETHER;
   13360 
   13361 	/* Check PCS */
   13362 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13363 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   13364 		ifmr->ifm_active |= IFM_NONE;
   13365 		sc->sc_tbi_linkup = 0;
   13366 		goto setled;
   13367 	}
   13368 
   13369 	sc->sc_tbi_linkup = 1;
   13370 	ifmr->ifm_status |= IFM_ACTIVE;
   13371 	if (sc->sc_type == WM_T_I354) {
   13372 		uint32_t status;
   13373 
   13374 		status = CSR_READ(sc, WMREG_STATUS);
   13375 		if (((status & STATUS_2P5_SKU) != 0)
   13376 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   13377 			ifmr->ifm_active |= IFM_2500_KX;
   13378 		} else
   13379 			ifmr->ifm_active |= IFM_1000_KX;
   13380 	} else {
   13381 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   13382 		case PCS_LSTS_SPEED_10:
   13383 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   13384 			break;
   13385 		case PCS_LSTS_SPEED_100:
   13386 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   13387 			break;
   13388 		case PCS_LSTS_SPEED_1000:
   13389 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13390 			break;
   13391 		default:
   13392 			device_printf(sc->sc_dev, "Unknown speed\n");
   13393 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13394 			break;
   13395 		}
   13396 	}
   13397 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   13398 	if ((reg & PCS_LSTS_FDX) != 0)
   13399 		ifmr->ifm_active |= IFM_FDX;
   13400 	else
   13401 		ifmr->ifm_active |= IFM_HDX;
   13402 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   13403 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   13404 		/* Check flow */
   13405 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13406 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   13407 			DPRINTF(sc, WM_DEBUG_LINK,
   13408 			    ("XXX LINKOK but not ACOMP\n"));
   13409 			goto setled;
   13410 		}
   13411 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   13412 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   13413 		DPRINTF(sc, WM_DEBUG_LINK,
   13414 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   13415 		if ((pcs_adv & TXCW_SYM_PAUSE)
   13416 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   13417 			mii->mii_media_active |= IFM_FLOW
   13418 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   13419 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   13420 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   13421 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   13422 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   13423 			mii->mii_media_active |= IFM_FLOW
   13424 			    | IFM_ETH_TXPAUSE;
   13425 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   13426 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   13427 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   13428 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   13429 			mii->mii_media_active |= IFM_FLOW
   13430 			    | IFM_ETH_RXPAUSE;
   13431 		}
   13432 	}
   13433 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   13434 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   13435 setled:
   13436 	wm_tbi_serdes_set_linkled(sc);
   13437 }
   13438 
   13439 /*
   13440  * wm_serdes_tick:
   13441  *
   13442  *	Check the link on serdes devices.
   13443  */
   13444 static void
   13445 wm_serdes_tick(struct wm_softc *sc)
   13446 {
   13447 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   13448 	struct mii_data *mii = &sc->sc_mii;
   13449 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13450 	uint32_t reg;
   13451 
   13452 	KASSERT(mutex_owned(sc->sc_core_lock));
   13453 
   13454 	mii->mii_media_status = IFM_AVALID;
   13455 	mii->mii_media_active = IFM_ETHER;
   13456 
   13457 	/* Check PCS */
   13458 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13459 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   13460 		mii->mii_media_status |= IFM_ACTIVE;
   13461 		sc->sc_tbi_linkup = 1;
   13462 		sc->sc_tbi_serdes_ticks = 0;
   13463 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   13464 		if ((reg & PCS_LSTS_FDX) != 0)
   13465 			mii->mii_media_active |= IFM_FDX;
   13466 		else
   13467 			mii->mii_media_active |= IFM_HDX;
   13468 	} else {
   13469 		mii->mii_media_status |= IFM_NONE;
   13470 		sc->sc_tbi_linkup = 0;
   13471 		/* If the timer expired, retry autonegotiation */
   13472 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13473 		    && (++sc->sc_tbi_serdes_ticks
   13474 			>= sc->sc_tbi_serdes_anegticks)) {
   13475 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13476 				device_xname(sc->sc_dev), __func__));
   13477 			sc->sc_tbi_serdes_ticks = 0;
   13478 			/* XXX */
   13479 			wm_serdes_mediachange(ifp);
   13480 		}
   13481 	}
   13482 
   13483 	wm_tbi_serdes_set_linkled(sc);
   13484 }
   13485 
   13486 /* SFP related */
   13487 
   13488 static int
   13489 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   13490 {
   13491 	uint32_t i2ccmd;
   13492 	int i;
   13493 
   13494 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   13495 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   13496 
   13497 	/* Poll the ready bit */
   13498 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   13499 		delay(50);
   13500 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   13501 		if (i2ccmd & I2CCMD_READY)
   13502 			break;
   13503 	}
   13504 	if ((i2ccmd & I2CCMD_READY) == 0)
   13505 		return -1;
   13506 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   13507 		return -1;
   13508 
   13509 	*data = i2ccmd & 0x00ff;
   13510 
   13511 	return 0;
   13512 }
   13513 
   13514 static uint32_t
   13515 wm_sfp_get_media_type(struct wm_softc *sc)
   13516 {
   13517 	uint32_t ctrl_ext;
   13518 	uint8_t val = 0;
   13519 	int timeout = 3;
   13520 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   13521 	int rv = -1;
   13522 
   13523 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13524 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   13525 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   13526 	CSR_WRITE_FLUSH(sc);
   13527 
   13528 	/* Read SFP module data */
   13529 	while (timeout) {
   13530 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   13531 		if (rv == 0)
   13532 			break;
   13533 		delay(100*1000); /* XXX too big */
   13534 		timeout--;
   13535 	}
   13536 	if (rv != 0)
   13537 		goto out;
   13538 
   13539 	switch (val) {
   13540 	case SFF_SFP_ID_SFF:
   13541 		aprint_normal_dev(sc->sc_dev,
   13542 		    "Module/Connector soldered to board\n");
   13543 		break;
   13544 	case SFF_SFP_ID_SFP:
   13545 		sc->sc_flags |= WM_F_SFP;
   13546 		break;
   13547 	case SFF_SFP_ID_UNKNOWN:
   13548 		goto out;
   13549 	default:
   13550 		break;
   13551 	}
   13552 
   13553 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   13554 	if (rv != 0)
   13555 		goto out;
   13556 
   13557 	sc->sc_sfptype = val;
   13558 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   13559 		mediatype = WM_MEDIATYPE_SERDES;
   13560 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   13561 		sc->sc_flags |= WM_F_SGMII;
   13562 		mediatype = WM_MEDIATYPE_COPPER;
   13563 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   13564 		sc->sc_flags |= WM_F_SGMII;
   13565 		mediatype = WM_MEDIATYPE_SERDES;
   13566 	} else {
   13567 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   13568 		    __func__, sc->sc_sfptype);
   13569 		sc->sc_sfptype = 0; /* XXX unknown */
   13570 	}
   13571 
   13572 out:
   13573 	/* Restore I2C interface setting */
   13574 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13575 
   13576 	return mediatype;
   13577 }
   13578 
   13579 /*
   13580  * NVM related.
   13581  * Microwire, SPI (w/wo EERD) and Flash.
   13582  */
   13583 
   13584 /* Both spi and uwire */
   13585 
   13586 /*
   13587  * wm_eeprom_sendbits:
   13588  *
   13589  *	Send a series of bits to the EEPROM.
   13590  */
   13591 static void
   13592 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   13593 {
   13594 	uint32_t reg;
   13595 	int x;
   13596 
   13597 	reg = CSR_READ(sc, WMREG_EECD);
   13598 
   13599 	for (x = nbits; x > 0; x--) {
   13600 		if (bits & (1U << (x - 1)))
   13601 			reg |= EECD_DI;
   13602 		else
   13603 			reg &= ~EECD_DI;
   13604 		CSR_WRITE(sc, WMREG_EECD, reg);
   13605 		CSR_WRITE_FLUSH(sc);
   13606 		delay(2);
   13607 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13608 		CSR_WRITE_FLUSH(sc);
   13609 		delay(2);
   13610 		CSR_WRITE(sc, WMREG_EECD, reg);
   13611 		CSR_WRITE_FLUSH(sc);
   13612 		delay(2);
   13613 	}
   13614 }
   13615 
   13616 /*
   13617  * wm_eeprom_recvbits:
   13618  *
   13619  *	Receive a series of bits from the EEPROM.
   13620  */
   13621 static void
   13622 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   13623 {
   13624 	uint32_t reg, val;
   13625 	int x;
   13626 
   13627 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   13628 
   13629 	val = 0;
   13630 	for (x = nbits; x > 0; x--) {
   13631 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13632 		CSR_WRITE_FLUSH(sc);
   13633 		delay(2);
   13634 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   13635 			val |= (1U << (x - 1));
   13636 		CSR_WRITE(sc, WMREG_EECD, reg);
   13637 		CSR_WRITE_FLUSH(sc);
   13638 		delay(2);
   13639 	}
   13640 	*valp = val;
   13641 }
   13642 
   13643 /* Microwire */
   13644 
   13645 /*
   13646  * wm_nvm_read_uwire:
   13647  *
   13648  *	Read a word from the EEPROM using the MicroWire protocol.
   13649  */
   13650 static int
   13651 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13652 {
   13653 	uint32_t reg, val;
   13654 	int i, rv;
   13655 
   13656 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13657 		device_xname(sc->sc_dev), __func__));
   13658 
   13659 	rv = sc->nvm.acquire(sc);
   13660 	if (rv != 0)
   13661 		return rv;
   13662 
   13663 	for (i = 0; i < wordcnt; i++) {
   13664 		/* Clear SK and DI. */
   13665 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   13666 		CSR_WRITE(sc, WMREG_EECD, reg);
   13667 
   13668 		/*
   13669 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   13670 		 * and Xen.
   13671 		 *
   13672 		 * We use this workaround only for 82540 because qemu's
   13673 		 * e1000 act as 82540.
   13674 		 */
   13675 		if (sc->sc_type == WM_T_82540) {
   13676 			reg |= EECD_SK;
   13677 			CSR_WRITE(sc, WMREG_EECD, reg);
   13678 			reg &= ~EECD_SK;
   13679 			CSR_WRITE(sc, WMREG_EECD, reg);
   13680 			CSR_WRITE_FLUSH(sc);
   13681 			delay(2);
   13682 		}
   13683 		/* XXX: end of workaround */
   13684 
   13685 		/* Set CHIP SELECT. */
   13686 		reg |= EECD_CS;
   13687 		CSR_WRITE(sc, WMREG_EECD, reg);
   13688 		CSR_WRITE_FLUSH(sc);
   13689 		delay(2);
   13690 
   13691 		/* Shift in the READ command. */
   13692 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   13693 
   13694 		/* Shift in address. */
   13695 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   13696 
   13697 		/* Shift out the data. */
   13698 		wm_eeprom_recvbits(sc, &val, 16);
   13699 		data[i] = val & 0xffff;
   13700 
   13701 		/* Clear CHIP SELECT. */
   13702 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   13703 		CSR_WRITE(sc, WMREG_EECD, reg);
   13704 		CSR_WRITE_FLUSH(sc);
   13705 		delay(2);
   13706 	}
   13707 
   13708 	sc->nvm.release(sc);
   13709 	return 0;
   13710 }
   13711 
   13712 /* SPI */
   13713 
   13714 /*
   13715  * Set SPI and FLASH related information from the EECD register.
   13716  * For 82541 and 82547, the word size is taken from EEPROM.
   13717  */
   13718 static int
   13719 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   13720 {
   13721 	int size;
   13722 	uint32_t reg;
   13723 	uint16_t data;
   13724 
   13725 	reg = CSR_READ(sc, WMREG_EECD);
   13726 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   13727 
   13728 	/* Read the size of NVM from EECD by default */
   13729 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13730 	switch (sc->sc_type) {
   13731 	case WM_T_82541:
   13732 	case WM_T_82541_2:
   13733 	case WM_T_82547:
   13734 	case WM_T_82547_2:
   13735 		/* Set dummy value to access EEPROM */
   13736 		sc->sc_nvm_wordsize = 64;
   13737 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   13738 			aprint_error_dev(sc->sc_dev,
   13739 			    "%s: failed to read EEPROM size\n", __func__);
   13740 		}
   13741 		reg = data;
   13742 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13743 		if (size == 0)
   13744 			size = 6; /* 64 word size */
   13745 		else
   13746 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   13747 		break;
   13748 	case WM_T_80003:
   13749 	case WM_T_82571:
   13750 	case WM_T_82572:
   13751 	case WM_T_82573: /* SPI case */
   13752 	case WM_T_82574: /* SPI case */
   13753 	case WM_T_82583: /* SPI case */
   13754 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13755 		if (size > 14)
   13756 			size = 14;
   13757 		break;
   13758 	case WM_T_82575:
   13759 	case WM_T_82576:
   13760 	case WM_T_82580:
   13761 	case WM_T_I350:
   13762 	case WM_T_I354:
   13763 	case WM_T_I210:
   13764 	case WM_T_I211:
   13765 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13766 		if (size > 15)
   13767 			size = 15;
   13768 		break;
   13769 	default:
   13770 		aprint_error_dev(sc->sc_dev,
   13771 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   13772 		return -1;
   13773 		break;
   13774 	}
   13775 
   13776 	sc->sc_nvm_wordsize = 1 << size;
   13777 
   13778 	return 0;
   13779 }
   13780 
   13781 /*
   13782  * wm_nvm_ready_spi:
   13783  *
   13784  *	Wait for a SPI EEPROM to be ready for commands.
   13785  */
   13786 static int
   13787 wm_nvm_ready_spi(struct wm_softc *sc)
   13788 {
   13789 	uint32_t val;
   13790 	int usec;
   13791 
   13792 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13793 		device_xname(sc->sc_dev), __func__));
   13794 
   13795 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   13796 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   13797 		wm_eeprom_recvbits(sc, &val, 8);
   13798 		if ((val & SPI_SR_RDY) == 0)
   13799 			break;
   13800 	}
   13801 	if (usec >= SPI_MAX_RETRIES) {
   13802 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   13803 		return -1;
   13804 	}
   13805 	return 0;
   13806 }
   13807 
   13808 /*
   13809  * wm_nvm_read_spi:
   13810  *
   13811  *	Read a work from the EEPROM using the SPI protocol.
   13812  */
   13813 static int
   13814 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13815 {
   13816 	uint32_t reg, val;
   13817 	int i;
   13818 	uint8_t opc;
   13819 	int rv;
   13820 
   13821 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13822 		device_xname(sc->sc_dev), __func__));
   13823 
   13824 	rv = sc->nvm.acquire(sc);
   13825 	if (rv != 0)
   13826 		return rv;
   13827 
   13828 	/* Clear SK and CS. */
   13829 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   13830 	CSR_WRITE(sc, WMREG_EECD, reg);
   13831 	CSR_WRITE_FLUSH(sc);
   13832 	delay(2);
   13833 
   13834 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   13835 		goto out;
   13836 
   13837 	/* Toggle CS to flush commands. */
   13838 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   13839 	CSR_WRITE_FLUSH(sc);
   13840 	delay(2);
   13841 	CSR_WRITE(sc, WMREG_EECD, reg);
   13842 	CSR_WRITE_FLUSH(sc);
   13843 	delay(2);
   13844 
   13845 	opc = SPI_OPC_READ;
   13846 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   13847 		opc |= SPI_OPC_A8;
   13848 
   13849 	wm_eeprom_sendbits(sc, opc, 8);
   13850 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   13851 
   13852 	for (i = 0; i < wordcnt; i++) {
   13853 		wm_eeprom_recvbits(sc, &val, 16);
   13854 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   13855 	}
   13856 
   13857 	/* Raise CS and clear SK. */
   13858 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   13859 	CSR_WRITE(sc, WMREG_EECD, reg);
   13860 	CSR_WRITE_FLUSH(sc);
   13861 	delay(2);
   13862 
   13863 out:
   13864 	sc->nvm.release(sc);
   13865 	return rv;
   13866 }
   13867 
   13868 /* Using with EERD */
   13869 
   13870 static int
   13871 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   13872 {
   13873 	uint32_t attempts = 100000;
   13874 	uint32_t i, reg = 0;
   13875 	int32_t done = -1;
   13876 
   13877 	for (i = 0; i < attempts; i++) {
   13878 		reg = CSR_READ(sc, rw);
   13879 
   13880 		if (reg & EERD_DONE) {
   13881 			done = 0;
   13882 			break;
   13883 		}
   13884 		delay(5);
   13885 	}
   13886 
   13887 	return done;
   13888 }
   13889 
   13890 static int
   13891 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   13892 {
   13893 	int i, eerd = 0;
   13894 	int rv;
   13895 
   13896 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13897 		device_xname(sc->sc_dev), __func__));
   13898 
   13899 	rv = sc->nvm.acquire(sc);
   13900 	if (rv != 0)
   13901 		return rv;
   13902 
   13903 	for (i = 0; i < wordcnt; i++) {
   13904 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   13905 		CSR_WRITE(sc, WMREG_EERD, eerd);
   13906 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   13907 		if (rv != 0) {
   13908 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   13909 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   13910 			break;
   13911 		}
   13912 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   13913 	}
   13914 
   13915 	sc->nvm.release(sc);
   13916 	return rv;
   13917 }
   13918 
   13919 /* Flash */
   13920 
   13921 static int
   13922 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   13923 {
   13924 	uint32_t eecd;
   13925 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   13926 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   13927 	uint32_t nvm_dword = 0;
   13928 	uint8_t sig_byte = 0;
   13929 	int rv;
   13930 
   13931 	switch (sc->sc_type) {
   13932 	case WM_T_PCH_SPT:
   13933 	case WM_T_PCH_CNP:
   13934 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   13935 		act_offset = ICH_NVM_SIG_WORD * 2;
   13936 
   13937 		/* Set bank to 0 in case flash read fails. */
   13938 		*bank = 0;
   13939 
   13940 		/* Check bank 0 */
   13941 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   13942 		if (rv != 0)
   13943 			return rv;
   13944 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13945 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13946 			*bank = 0;
   13947 			return 0;
   13948 		}
   13949 
   13950 		/* Check bank 1 */
   13951 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   13952 		    &nvm_dword);
   13953 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13954 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13955 			*bank = 1;
   13956 			return 0;
   13957 		}
   13958 		aprint_error_dev(sc->sc_dev,
   13959 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   13960 		return -1;
   13961 	case WM_T_ICH8:
   13962 	case WM_T_ICH9:
   13963 		eecd = CSR_READ(sc, WMREG_EECD);
   13964 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   13965 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   13966 			return 0;
   13967 		}
   13968 		/* FALLTHROUGH */
   13969 	default:
   13970 		/* Default to 0 */
   13971 		*bank = 0;
   13972 
   13973 		/* Check bank 0 */
   13974 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   13975 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13976 			*bank = 0;
   13977 			return 0;
   13978 		}
   13979 
   13980 		/* Check bank 1 */
   13981 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   13982 		    &sig_byte);
   13983 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13984 			*bank = 1;
   13985 			return 0;
   13986 		}
   13987 	}
   13988 
   13989 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   13990 		device_xname(sc->sc_dev)));
   13991 	return -1;
   13992 }
   13993 
   13994 /******************************************************************************
   13995  * This function does initial flash setup so that a new read/write/erase cycle
   13996  * can be started.
   13997  *
   13998  * sc - The pointer to the hw structure
   13999  ****************************************************************************/
   14000 static int32_t
   14001 wm_ich8_cycle_init(struct wm_softc *sc)
   14002 {
   14003 	uint16_t hsfsts;
   14004 	int32_t error = 1;
   14005 	int32_t i     = 0;
   14006 
   14007 	if (sc->sc_type >= WM_T_PCH_SPT)
   14008 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   14009 	else
   14010 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   14011 
   14012 	/* May be check the Flash Des Valid bit in Hw status */
   14013 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   14014 		return error;
   14015 
   14016 	/* Clear FCERR in Hw status by writing 1 */
   14017 	/* Clear DAEL in Hw status by writing a 1 */
   14018 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   14019 
   14020 	if (sc->sc_type >= WM_T_PCH_SPT)
   14021 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   14022 	else
   14023 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   14024 
   14025 	/*
   14026 	 * Either we should have a hardware SPI cycle in progress bit to check
   14027 	 * against, in order to start a new cycle or FDONE bit should be
   14028 	 * changed in the hardware so that it is 1 after hardware reset, which
   14029 	 * can then be used as an indication whether a cycle is in progress or
   14030 	 * has been completed .. we should also have some software semaphore
   14031 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   14032 	 * threads access to those bits can be sequentiallized or a way so that
   14033 	 * 2 threads don't start the cycle at the same time
   14034 	 */
   14035 
   14036 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   14037 		/*
   14038 		 * There is no cycle running at present, so we can start a
   14039 		 * cycle
   14040 		 */
   14041 
   14042 		/* Begin by setting Flash Cycle Done. */
   14043 		hsfsts |= HSFSTS_DONE;
   14044 		if (sc->sc_type >= WM_T_PCH_SPT)
   14045 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14046 			    hsfsts & 0xffffUL);
   14047 		else
   14048 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   14049 		error = 0;
   14050 	} else {
   14051 		/*
   14052 		 * Otherwise poll for sometime so the current cycle has a
   14053 		 * chance to end before giving up.
   14054 		 */
   14055 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   14056 			if (sc->sc_type >= WM_T_PCH_SPT)
   14057 				hsfsts = ICH8_FLASH_READ32(sc,
   14058 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14059 			else
   14060 				hsfsts = ICH8_FLASH_READ16(sc,
   14061 				    ICH_FLASH_HSFSTS);
   14062 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   14063 				error = 0;
   14064 				break;
   14065 			}
   14066 			delay(1);
   14067 		}
   14068 		if (error == 0) {
   14069 			/*
   14070 			 * Successful in waiting for previous cycle to timeout,
   14071 			 * now set the Flash Cycle Done.
   14072 			 */
   14073 			hsfsts |= HSFSTS_DONE;
   14074 			if (sc->sc_type >= WM_T_PCH_SPT)
   14075 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14076 				    hsfsts & 0xffffUL);
   14077 			else
   14078 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   14079 				    hsfsts);
   14080 		}
   14081 	}
   14082 	return error;
   14083 }
   14084 
   14085 /******************************************************************************
   14086  * This function starts a flash cycle and waits for its completion
   14087  *
   14088  * sc - The pointer to the hw structure
   14089  ****************************************************************************/
   14090 static int32_t
   14091 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   14092 {
   14093 	uint16_t hsflctl;
   14094 	uint16_t hsfsts;
   14095 	int32_t error = 1;
   14096 	uint32_t i = 0;
   14097 
   14098 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   14099 	if (sc->sc_type >= WM_T_PCH_SPT)
   14100 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   14101 	else
   14102 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14103 	hsflctl |= HSFCTL_GO;
   14104 	if (sc->sc_type >= WM_T_PCH_SPT)
   14105 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14106 		    (uint32_t)hsflctl << 16);
   14107 	else
   14108 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14109 
   14110 	/* Wait till FDONE bit is set to 1 */
   14111 	do {
   14112 		if (sc->sc_type >= WM_T_PCH_SPT)
   14113 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14114 			    & 0xffffUL;
   14115 		else
   14116 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   14117 		if (hsfsts & HSFSTS_DONE)
   14118 			break;
   14119 		delay(1);
   14120 		i++;
   14121 	} while (i < timeout);
   14122 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   14123 		error = 0;
   14124 
   14125 	return error;
   14126 }
   14127 
   14128 /******************************************************************************
   14129  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   14130  *
   14131  * sc - The pointer to the hw structure
   14132  * index - The index of the byte or word to read.
   14133  * size - Size of data to read, 1=byte 2=word, 4=dword
   14134  * data - Pointer to the word to store the value read.
   14135  *****************************************************************************/
   14136 static int32_t
   14137 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   14138     uint32_t size, uint32_t *data)
   14139 {
   14140 	uint16_t hsfsts;
   14141 	uint16_t hsflctl;
   14142 	uint32_t flash_linear_address;
   14143 	uint32_t flash_data = 0;
   14144 	int32_t error = 1;
   14145 	int32_t count = 0;
   14146 
   14147 	if (size < 1  || size > 4 || data == 0x0 ||
   14148 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   14149 		return error;
   14150 
   14151 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   14152 	    sc->sc_ich8_flash_base;
   14153 
   14154 	do {
   14155 		delay(1);
   14156 		/* Steps */
   14157 		error = wm_ich8_cycle_init(sc);
   14158 		if (error)
   14159 			break;
   14160 
   14161 		if (sc->sc_type >= WM_T_PCH_SPT)
   14162 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14163 			    >> 16;
   14164 		else
   14165 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14166 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   14167 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   14168 		    & HSFCTL_BCOUNT_MASK;
   14169 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   14170 		if (sc->sc_type >= WM_T_PCH_SPT) {
   14171 			/*
   14172 			 * In SPT, This register is in Lan memory space, not
   14173 			 * flash. Therefore, only 32 bit access is supported.
   14174 			 */
   14175 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14176 			    (uint32_t)hsflctl << 16);
   14177 		} else
   14178 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14179 
   14180 		/*
   14181 		 * Write the last 24 bits of index into Flash Linear address
   14182 		 * field in Flash Address
   14183 		 */
   14184 		/* TODO: TBD maybe check the index against the size of flash */
   14185 
   14186 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   14187 
   14188 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   14189 
   14190 		/*
   14191 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   14192 		 * the whole sequence a few more times, else read in (shift in)
   14193 		 * the Flash Data0, the order is least significant byte first
   14194 		 * msb to lsb
   14195 		 */
   14196 		if (error == 0) {
   14197 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   14198 			if (size == 1)
   14199 				*data = (uint8_t)(flash_data & 0x000000FF);
   14200 			else if (size == 2)
   14201 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   14202 			else if (size == 4)
   14203 				*data = (uint32_t)flash_data;
   14204 			break;
   14205 		} else {
   14206 			/*
   14207 			 * If we've gotten here, then things are probably
   14208 			 * completely hosed, but if the error condition is
   14209 			 * detected, it won't hurt to give it another try...
   14210 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   14211 			 */
   14212 			if (sc->sc_type >= WM_T_PCH_SPT)
   14213 				hsfsts = ICH8_FLASH_READ32(sc,
   14214 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14215 			else
   14216 				hsfsts = ICH8_FLASH_READ16(sc,
   14217 				    ICH_FLASH_HSFSTS);
   14218 
   14219 			if (hsfsts & HSFSTS_ERR) {
   14220 				/* Repeat for some time before giving up. */
   14221 				continue;
   14222 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   14223 				break;
   14224 		}
   14225 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   14226 
   14227 	return error;
   14228 }
   14229 
   14230 /******************************************************************************
   14231  * Reads a single byte from the NVM using the ICH8 flash access registers.
   14232  *
   14233  * sc - pointer to wm_hw structure
   14234  * index - The index of the byte to read.
   14235  * data - Pointer to a byte to store the value read.
   14236  *****************************************************************************/
   14237 static int32_t
   14238 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   14239 {
   14240 	int32_t status;
   14241 	uint32_t word = 0;
   14242 
   14243 	status = wm_read_ich8_data(sc, index, 1, &word);
   14244 	if (status == 0)
   14245 		*data = (uint8_t)word;
   14246 	else
   14247 		*data = 0;
   14248 
   14249 	return status;
   14250 }
   14251 
   14252 /******************************************************************************
   14253  * Reads a word from the NVM using the ICH8 flash access registers.
   14254  *
   14255  * sc - pointer to wm_hw structure
   14256  * index - The starting byte index of the word to read.
   14257  * data - Pointer to a word to store the value read.
   14258  *****************************************************************************/
   14259 static int32_t
   14260 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   14261 {
   14262 	int32_t status;
   14263 	uint32_t word = 0;
   14264 
   14265 	status = wm_read_ich8_data(sc, index, 2, &word);
   14266 	if (status == 0)
   14267 		*data = (uint16_t)word;
   14268 	else
   14269 		*data = 0;
   14270 
   14271 	return status;
   14272 }
   14273 
   14274 /******************************************************************************
   14275  * Reads a dword from the NVM using the ICH8 flash access registers.
   14276  *
   14277  * sc - pointer to wm_hw structure
   14278  * index - The starting byte index of the word to read.
   14279  * data - Pointer to a word to store the value read.
   14280  *****************************************************************************/
   14281 static int32_t
   14282 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   14283 {
   14284 	int32_t status;
   14285 
   14286 	status = wm_read_ich8_data(sc, index, 4, data);
   14287 	return status;
   14288 }
   14289 
   14290 /******************************************************************************
   14291  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   14292  * register.
   14293  *
   14294  * sc - Struct containing variables accessed by shared code
   14295  * offset - offset of word in the EEPROM to read
   14296  * data - word read from the EEPROM
   14297  * words - number of words to read
   14298  *****************************************************************************/
   14299 static int
   14300 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14301 {
   14302 	int rv;
   14303 	uint32_t flash_bank = 0;
   14304 	uint32_t act_offset = 0;
   14305 	uint32_t bank_offset = 0;
   14306 	uint16_t word = 0;
   14307 	uint16_t i = 0;
   14308 
   14309 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14310 		device_xname(sc->sc_dev), __func__));
   14311 
   14312 	rv = sc->nvm.acquire(sc);
   14313 	if (rv != 0)
   14314 		return rv;
   14315 
   14316 	/*
   14317 	 * We need to know which is the valid flash bank.  In the event
   14318 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14319 	 * managing flash_bank. So it cannot be trusted and needs
   14320 	 * to be updated with each read.
   14321 	 */
   14322 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14323 	if (rv) {
   14324 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14325 			device_xname(sc->sc_dev)));
   14326 		flash_bank = 0;
   14327 	}
   14328 
   14329 	/*
   14330 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14331 	 * size
   14332 	 */
   14333 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14334 
   14335 	for (i = 0; i < words; i++) {
   14336 		/* The NVM part needs a byte offset, hence * 2 */
   14337 		act_offset = bank_offset + ((offset + i) * 2);
   14338 		rv = wm_read_ich8_word(sc, act_offset, &word);
   14339 		if (rv) {
   14340 			aprint_error_dev(sc->sc_dev,
   14341 			    "%s: failed to read NVM\n", __func__);
   14342 			break;
   14343 		}
   14344 		data[i] = word;
   14345 	}
   14346 
   14347 	sc->nvm.release(sc);
   14348 	return rv;
   14349 }
   14350 
   14351 /******************************************************************************
   14352  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   14353  * register.
   14354  *
   14355  * sc - Struct containing variables accessed by shared code
   14356  * offset - offset of word in the EEPROM to read
   14357  * data - word read from the EEPROM
   14358  * words - number of words to read
   14359  *****************************************************************************/
   14360 static int
   14361 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14362 {
   14363 	int	 rv;
   14364 	uint32_t flash_bank = 0;
   14365 	uint32_t act_offset = 0;
   14366 	uint32_t bank_offset = 0;
   14367 	uint32_t dword = 0;
   14368 	uint16_t i = 0;
   14369 
   14370 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14371 		device_xname(sc->sc_dev), __func__));
   14372 
   14373 	rv = sc->nvm.acquire(sc);
   14374 	if (rv != 0)
   14375 		return rv;
   14376 
   14377 	/*
   14378 	 * We need to know which is the valid flash bank.  In the event
   14379 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14380 	 * managing flash_bank. So it cannot be trusted and needs
   14381 	 * to be updated with each read.
   14382 	 */
   14383 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14384 	if (rv) {
   14385 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14386 			device_xname(sc->sc_dev)));
   14387 		flash_bank = 0;
   14388 	}
   14389 
   14390 	/*
   14391 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14392 	 * size
   14393 	 */
   14394 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14395 
   14396 	for (i = 0; i < words; i++) {
   14397 		/* The NVM part needs a byte offset, hence * 2 */
   14398 		act_offset = bank_offset + ((offset + i) * 2);
   14399 		/* but we must read dword aligned, so mask ... */
   14400 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   14401 		if (rv) {
   14402 			aprint_error_dev(sc->sc_dev,
   14403 			    "%s: failed to read NVM\n", __func__);
   14404 			break;
   14405 		}
   14406 		/* ... and pick out low or high word */
   14407 		if ((act_offset & 0x2) == 0)
   14408 			data[i] = (uint16_t)(dword & 0xFFFF);
   14409 		else
   14410 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   14411 	}
   14412 
   14413 	sc->nvm.release(sc);
   14414 	return rv;
   14415 }
   14416 
   14417 /* iNVM */
   14418 
   14419 static int
   14420 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   14421 {
   14422 	int32_t	 rv = 0;
   14423 	uint32_t invm_dword;
   14424 	uint16_t i;
   14425 	uint8_t record_type, word_address;
   14426 
   14427 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14428 		device_xname(sc->sc_dev), __func__));
   14429 
   14430 	for (i = 0; i < INVM_SIZE; i++) {
   14431 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   14432 		/* Get record type */
   14433 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   14434 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   14435 			break;
   14436 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   14437 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   14438 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   14439 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   14440 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   14441 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   14442 			if (word_address == address) {
   14443 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   14444 				rv = 0;
   14445 				break;
   14446 			}
   14447 		}
   14448 	}
   14449 
   14450 	return rv;
   14451 }
   14452 
   14453 static int
   14454 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14455 {
   14456 	int i, rv;
   14457 
   14458 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14459 		device_xname(sc->sc_dev), __func__));
   14460 
   14461 	rv = sc->nvm.acquire(sc);
   14462 	if (rv != 0)
   14463 		return rv;
   14464 
   14465 	for (i = 0; i < words; i++) {
   14466 		switch (offset + i) {
   14467 		case NVM_OFF_MACADDR:
   14468 		case NVM_OFF_MACADDR1:
   14469 		case NVM_OFF_MACADDR2:
   14470 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   14471 			if (rv != 0) {
   14472 				data[i] = 0xffff;
   14473 				rv = -1;
   14474 			}
   14475 			break;
   14476 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   14477 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14478 			if (rv != 0) {
   14479 				*data = INVM_DEFAULT_AL;
   14480 				rv = 0;
   14481 			}
   14482 			break;
   14483 		case NVM_OFF_CFG2:
   14484 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14485 			if (rv != 0) {
   14486 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   14487 				rv = 0;
   14488 			}
   14489 			break;
   14490 		case NVM_OFF_CFG4:
   14491 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14492 			if (rv != 0) {
   14493 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   14494 				rv = 0;
   14495 			}
   14496 			break;
   14497 		case NVM_OFF_LED_1_CFG:
   14498 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14499 			if (rv != 0) {
   14500 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   14501 				rv = 0;
   14502 			}
   14503 			break;
   14504 		case NVM_OFF_LED_0_2_CFG:
   14505 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14506 			if (rv != 0) {
   14507 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   14508 				rv = 0;
   14509 			}
   14510 			break;
   14511 		case NVM_OFF_ID_LED_SETTINGS:
   14512 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14513 			if (rv != 0) {
   14514 				*data = ID_LED_RESERVED_FFFF;
   14515 				rv = 0;
   14516 			}
   14517 			break;
   14518 		default:
   14519 			DPRINTF(sc, WM_DEBUG_NVM,
   14520 			    ("NVM word 0x%02x is not mapped.\n", offset));
   14521 			*data = NVM_RESERVED_WORD;
   14522 			break;
   14523 		}
   14524 	}
   14525 
   14526 	sc->nvm.release(sc);
   14527 	return rv;
   14528 }
   14529 
   14530 /* Lock, detecting NVM type, validate checksum, version and read */
   14531 
   14532 static int
   14533 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   14534 {
   14535 	uint32_t eecd = 0;
   14536 
   14537 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   14538 	    || sc->sc_type == WM_T_82583) {
   14539 		eecd = CSR_READ(sc, WMREG_EECD);
   14540 
   14541 		/* Isolate bits 15 & 16 */
   14542 		eecd = ((eecd >> 15) & 0x03);
   14543 
   14544 		/* If both bits are set, device is Flash type */
   14545 		if (eecd == 0x03)
   14546 			return 0;
   14547 	}
   14548 	return 1;
   14549 }
   14550 
   14551 static int
   14552 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   14553 {
   14554 	uint32_t eec;
   14555 
   14556 	eec = CSR_READ(sc, WMREG_EEC);
   14557 	if ((eec & EEC_FLASH_DETECTED) != 0)
   14558 		return 1;
   14559 
   14560 	return 0;
   14561 }
   14562 
   14563 /*
   14564  * wm_nvm_validate_checksum
   14565  *
   14566  * The checksum is defined as the sum of the first 64 (16 bit) words.
   14567  */
   14568 static int
   14569 wm_nvm_validate_checksum(struct wm_softc *sc)
   14570 {
   14571 	uint16_t checksum;
   14572 	uint16_t eeprom_data;
   14573 #ifdef WM_DEBUG
   14574 	uint16_t csum_wordaddr, valid_checksum;
   14575 #endif
   14576 	int i;
   14577 
   14578 	checksum = 0;
   14579 
   14580 	/* Don't check for I211 */
   14581 	if (sc->sc_type == WM_T_I211)
   14582 		return 0;
   14583 
   14584 #ifdef WM_DEBUG
   14585 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   14586 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   14587 		csum_wordaddr = NVM_OFF_COMPAT;
   14588 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   14589 	} else {
   14590 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   14591 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   14592 	}
   14593 
   14594 	/* Dump EEPROM image for debug */
   14595 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14596 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14597 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   14598 		/* XXX PCH_SPT? */
   14599 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   14600 		if ((eeprom_data & valid_checksum) == 0)
   14601 			DPRINTF(sc, WM_DEBUG_NVM,
   14602 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   14603 				device_xname(sc->sc_dev), eeprom_data,
   14604 				valid_checksum));
   14605 	}
   14606 
   14607 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
   14608 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   14609 		for (i = 0; i < NVM_SIZE; i++) {
   14610 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14611 				printf("XXXX ");
   14612 			else
   14613 				printf("%04hx ", eeprom_data);
   14614 			if (i % 8 == 7)
   14615 				printf("\n");
   14616 		}
   14617 	}
   14618 
   14619 #endif /* WM_DEBUG */
   14620 
   14621 	for (i = 0; i < NVM_SIZE; i++) {
   14622 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14623 			return -1;
   14624 		checksum += eeprom_data;
   14625 	}
   14626 
   14627 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   14628 #ifdef WM_DEBUG
   14629 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   14630 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   14631 #endif
   14632 	}
   14633 
   14634 	return 0;
   14635 }
   14636 
   14637 static void
   14638 wm_nvm_version_invm(struct wm_softc *sc)
   14639 {
   14640 	uint32_t dword;
   14641 
   14642 	/*
   14643 	 * Linux's code to decode version is very strange, so we don't
   14644 	 * obey that algorithm and just use word 61 as the document.
   14645 	 * Perhaps it's not perfect though...
   14646 	 *
   14647 	 * Example:
   14648 	 *
   14649 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   14650 	 */
   14651 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   14652 	dword = __SHIFTOUT(dword, INVM_VER_1);
   14653 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   14654 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   14655 }
   14656 
   14657 static void
   14658 wm_nvm_version(struct wm_softc *sc)
   14659 {
   14660 	uint16_t major, minor, build, patch;
   14661 	uint16_t uid0, uid1;
   14662 	uint16_t nvm_data;
   14663 	uint16_t off;
   14664 	bool check_version = false;
   14665 	bool check_optionrom = false;
   14666 	bool have_build = false;
   14667 	bool have_uid = true;
   14668 
   14669 	/*
   14670 	 * Version format:
   14671 	 *
   14672 	 * XYYZ
   14673 	 * X0YZ
   14674 	 * X0YY
   14675 	 *
   14676 	 * Example:
   14677 	 *
   14678 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   14679 	 *	82571	0x50a6	5.10.6?
   14680 	 *	82572	0x506a	5.6.10?
   14681 	 *	82572EI	0x5069	5.6.9?
   14682 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   14683 	 *		0x2013	2.1.3?
   14684 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   14685 	 * ICH8+82567	0x0040	0.4.0?
   14686 	 * ICH9+82566	0x1040	1.4.0?
   14687 	 *ICH10+82567	0x0043	0.4.3?
   14688 	 *  PCH+82577	0x00c1	0.12.1?
   14689 	 * PCH2+82579	0x00d3	0.13.3?
   14690 	 *		0x00d4	0.13.4?
   14691 	 *  LPT+I218	0x0023	0.2.3?
   14692 	 *  SPT+I219	0x0084	0.8.4?
   14693 	 *  CNP+I219	0x0054	0.5.4?
   14694 	 */
   14695 
   14696 	/*
   14697 	 * XXX
   14698 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   14699 	 * I've never seen real 82574 hardware with such small SPI ROM.
   14700 	 */
   14701 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   14702 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   14703 		have_uid = false;
   14704 
   14705 	switch (sc->sc_type) {
   14706 	case WM_T_82571:
   14707 	case WM_T_82572:
   14708 	case WM_T_82574:
   14709 	case WM_T_82583:
   14710 		check_version = true;
   14711 		check_optionrom = true;
   14712 		have_build = true;
   14713 		break;
   14714 	case WM_T_ICH8:
   14715 	case WM_T_ICH9:
   14716 	case WM_T_ICH10:
   14717 	case WM_T_PCH:
   14718 	case WM_T_PCH2:
   14719 	case WM_T_PCH_LPT:
   14720 	case WM_T_PCH_SPT:
   14721 	case WM_T_PCH_CNP:
   14722 		check_version = true;
   14723 		have_build = true;
   14724 		have_uid = false;
   14725 		break;
   14726 	case WM_T_82575:
   14727 	case WM_T_82576:
   14728 	case WM_T_82580:
   14729 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   14730 			check_version = true;
   14731 		break;
   14732 	case WM_T_I211:
   14733 		wm_nvm_version_invm(sc);
   14734 		have_uid = false;
   14735 		goto printver;
   14736 	case WM_T_I210:
   14737 		if (!wm_nvm_flash_presence_i210(sc)) {
   14738 			wm_nvm_version_invm(sc);
   14739 			have_uid = false;
   14740 			goto printver;
   14741 		}
   14742 		/* FALLTHROUGH */
   14743 	case WM_T_I350:
   14744 	case WM_T_I354:
   14745 		check_version = true;
   14746 		check_optionrom = true;
   14747 		break;
   14748 	default:
   14749 		return;
   14750 	}
   14751 	if (check_version
   14752 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   14753 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   14754 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   14755 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   14756 			build = nvm_data & NVM_BUILD_MASK;
   14757 			have_build = true;
   14758 		} else
   14759 			minor = nvm_data & 0x00ff;
   14760 
   14761 		/* Decimal */
   14762 		minor = (minor / 16) * 10 + (minor % 16);
   14763 		sc->sc_nvm_ver_major = major;
   14764 		sc->sc_nvm_ver_minor = minor;
   14765 
   14766 printver:
   14767 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   14768 		    sc->sc_nvm_ver_minor);
   14769 		if (have_build) {
   14770 			sc->sc_nvm_ver_build = build;
   14771 			aprint_verbose(".%d", build);
   14772 		}
   14773 	}
   14774 
   14775 	/* Assume the Option ROM area is at avove NVM_SIZE */
   14776 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   14777 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   14778 		/* Option ROM Version */
   14779 		if ((off != 0x0000) && (off != 0xffff)) {
   14780 			int rv;
   14781 
   14782 			off += NVM_COMBO_VER_OFF;
   14783 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   14784 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   14785 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   14786 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   14787 				/* 16bits */
   14788 				major = uid0 >> 8;
   14789 				build = (uid0 << 8) | (uid1 >> 8);
   14790 				patch = uid1 & 0x00ff;
   14791 				aprint_verbose(", option ROM Version %d.%d.%d",
   14792 				    major, build, patch);
   14793 			}
   14794 		}
   14795 	}
   14796 
   14797 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   14798 		aprint_verbose(", Image Unique ID %08x",
   14799 		    ((uint32_t)uid1 << 16) | uid0);
   14800 }
   14801 
   14802 /*
   14803  * wm_nvm_read:
   14804  *
   14805  *	Read data from the serial EEPROM.
   14806  */
   14807 static int
   14808 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14809 {
   14810 	int rv;
   14811 
   14812 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14813 		device_xname(sc->sc_dev), __func__));
   14814 
   14815 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   14816 		return -1;
   14817 
   14818 	rv = sc->nvm.read(sc, word, wordcnt, data);
   14819 
   14820 	return rv;
   14821 }
   14822 
   14823 /*
   14824  * Hardware semaphores.
   14825  * Very complexed...
   14826  */
   14827 
   14828 static int
   14829 wm_get_null(struct wm_softc *sc)
   14830 {
   14831 
   14832 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14833 		device_xname(sc->sc_dev), __func__));
   14834 	return 0;
   14835 }
   14836 
   14837 static void
   14838 wm_put_null(struct wm_softc *sc)
   14839 {
   14840 
   14841 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14842 		device_xname(sc->sc_dev), __func__));
   14843 	return;
   14844 }
   14845 
   14846 static int
   14847 wm_get_eecd(struct wm_softc *sc)
   14848 {
   14849 	uint32_t reg;
   14850 	int x;
   14851 
   14852 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14853 		device_xname(sc->sc_dev), __func__));
   14854 
   14855 	reg = CSR_READ(sc, WMREG_EECD);
   14856 
   14857 	/* Request EEPROM access. */
   14858 	reg |= EECD_EE_REQ;
   14859 	CSR_WRITE(sc, WMREG_EECD, reg);
   14860 
   14861 	/* ..and wait for it to be granted. */
   14862 	for (x = 0; x < 1000; x++) {
   14863 		reg = CSR_READ(sc, WMREG_EECD);
   14864 		if (reg & EECD_EE_GNT)
   14865 			break;
   14866 		delay(5);
   14867 	}
   14868 	if ((reg & EECD_EE_GNT) == 0) {
   14869 		aprint_error_dev(sc->sc_dev,
   14870 		    "could not acquire EEPROM GNT\n");
   14871 		reg &= ~EECD_EE_REQ;
   14872 		CSR_WRITE(sc, WMREG_EECD, reg);
   14873 		return -1;
   14874 	}
   14875 
   14876 	return 0;
   14877 }
   14878 
   14879 static void
   14880 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   14881 {
   14882 
   14883 	*eecd |= EECD_SK;
   14884 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14885 	CSR_WRITE_FLUSH(sc);
   14886 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14887 		delay(1);
   14888 	else
   14889 		delay(50);
   14890 }
   14891 
   14892 static void
   14893 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   14894 {
   14895 
   14896 	*eecd &= ~EECD_SK;
   14897 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14898 	CSR_WRITE_FLUSH(sc);
   14899 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14900 		delay(1);
   14901 	else
   14902 		delay(50);
   14903 }
   14904 
   14905 static void
   14906 wm_put_eecd(struct wm_softc *sc)
   14907 {
   14908 	uint32_t reg;
   14909 
   14910 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14911 		device_xname(sc->sc_dev), __func__));
   14912 
   14913 	/* Stop nvm */
   14914 	reg = CSR_READ(sc, WMREG_EECD);
   14915 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   14916 		/* Pull CS high */
   14917 		reg |= EECD_CS;
   14918 		wm_nvm_eec_clock_lower(sc, &reg);
   14919 	} else {
   14920 		/* CS on Microwire is active-high */
   14921 		reg &= ~(EECD_CS | EECD_DI);
   14922 		CSR_WRITE(sc, WMREG_EECD, reg);
   14923 		wm_nvm_eec_clock_raise(sc, &reg);
   14924 		wm_nvm_eec_clock_lower(sc, &reg);
   14925 	}
   14926 
   14927 	reg = CSR_READ(sc, WMREG_EECD);
   14928 	reg &= ~EECD_EE_REQ;
   14929 	CSR_WRITE(sc, WMREG_EECD, reg);
   14930 
   14931 	return;
   14932 }
   14933 
   14934 /*
   14935  * Get hardware semaphore.
   14936  * Same as e1000_get_hw_semaphore_generic()
   14937  */
   14938 static int
   14939 wm_get_swsm_semaphore(struct wm_softc *sc)
   14940 {
   14941 	int32_t timeout;
   14942 	uint32_t swsm;
   14943 
   14944 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14945 		device_xname(sc->sc_dev), __func__));
   14946 	KASSERT(sc->sc_nvm_wordsize > 0);
   14947 
   14948 retry:
   14949 	/* Get the SW semaphore. */
   14950 	timeout = sc->sc_nvm_wordsize + 1;
   14951 	while (timeout) {
   14952 		swsm = CSR_READ(sc, WMREG_SWSM);
   14953 
   14954 		if ((swsm & SWSM_SMBI) == 0)
   14955 			break;
   14956 
   14957 		delay(50);
   14958 		timeout--;
   14959 	}
   14960 
   14961 	if (timeout == 0) {
   14962 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   14963 			/*
   14964 			 * In rare circumstances, the SW semaphore may already
   14965 			 * be held unintentionally. Clear the semaphore once
   14966 			 * before giving up.
   14967 			 */
   14968 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   14969 			wm_put_swsm_semaphore(sc);
   14970 			goto retry;
   14971 		}
   14972 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
   14973 		return -1;
   14974 	}
   14975 
   14976 	/* Get the FW semaphore. */
   14977 	timeout = sc->sc_nvm_wordsize + 1;
   14978 	while (timeout) {
   14979 		swsm = CSR_READ(sc, WMREG_SWSM);
   14980 		swsm |= SWSM_SWESMBI;
   14981 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   14982 		/* If we managed to set the bit we got the semaphore. */
   14983 		swsm = CSR_READ(sc, WMREG_SWSM);
   14984 		if (swsm & SWSM_SWESMBI)
   14985 			break;
   14986 
   14987 		delay(50);
   14988 		timeout--;
   14989 	}
   14990 
   14991 	if (timeout == 0) {
   14992 		aprint_error_dev(sc->sc_dev,
   14993 		    "could not acquire SWSM SWESMBI\n");
   14994 		/* Release semaphores */
   14995 		wm_put_swsm_semaphore(sc);
   14996 		return -1;
   14997 	}
   14998 	return 0;
   14999 }
   15000 
   15001 /*
   15002  * Put hardware semaphore.
   15003  * Same as e1000_put_hw_semaphore_generic()
   15004  */
   15005 static void
   15006 wm_put_swsm_semaphore(struct wm_softc *sc)
   15007 {
   15008 	uint32_t swsm;
   15009 
   15010 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15011 		device_xname(sc->sc_dev), __func__));
   15012 
   15013 	swsm = CSR_READ(sc, WMREG_SWSM);
   15014 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   15015 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   15016 }
   15017 
   15018 /*
   15019  * Get SW/FW semaphore.
   15020  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   15021  */
   15022 static int
   15023 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   15024 {
   15025 	uint32_t swfw_sync;
   15026 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   15027 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   15028 	int timeout;
   15029 
   15030 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15031 		device_xname(sc->sc_dev), __func__));
   15032 
   15033 	if (sc->sc_type == WM_T_80003)
   15034 		timeout = 50;
   15035 	else
   15036 		timeout = 200;
   15037 
   15038 	while (timeout) {
   15039 		if (wm_get_swsm_semaphore(sc)) {
   15040 			aprint_error_dev(sc->sc_dev,
   15041 			    "%s: failed to get semaphore\n",
   15042 			    __func__);
   15043 			return -1;
   15044 		}
   15045 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   15046 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   15047 			swfw_sync |= swmask;
   15048 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   15049 			wm_put_swsm_semaphore(sc);
   15050 			return 0;
   15051 		}
   15052 		wm_put_swsm_semaphore(sc);
   15053 		delay(5000);
   15054 		timeout--;
   15055 	}
   15056 	device_printf(sc->sc_dev,
   15057 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   15058 	    mask, swfw_sync);
   15059 	return -1;
   15060 }
   15061 
   15062 static void
   15063 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   15064 {
   15065 	uint32_t swfw_sync;
   15066 
   15067 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15068 		device_xname(sc->sc_dev), __func__));
   15069 
   15070 	while (wm_get_swsm_semaphore(sc) != 0)
   15071 		continue;
   15072 
   15073 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   15074 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   15075 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   15076 
   15077 	wm_put_swsm_semaphore(sc);
   15078 }
   15079 
   15080 static int
   15081 wm_get_nvm_80003(struct wm_softc *sc)
   15082 {
   15083 	int rv;
   15084 
   15085 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   15086 		device_xname(sc->sc_dev), __func__));
   15087 
   15088 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   15089 		aprint_error_dev(sc->sc_dev,
   15090 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   15091 		return rv;
   15092 	}
   15093 
   15094 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15095 	    && (rv = wm_get_eecd(sc)) != 0) {
   15096 		aprint_error_dev(sc->sc_dev,
   15097 		    "%s: failed to get semaphore(EECD)\n", __func__);
   15098 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15099 		return rv;
   15100 	}
   15101 
   15102 	return 0;
   15103 }
   15104 
   15105 static void
   15106 wm_put_nvm_80003(struct wm_softc *sc)
   15107 {
   15108 
   15109 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15110 		device_xname(sc->sc_dev), __func__));
   15111 
   15112 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15113 		wm_put_eecd(sc);
   15114 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15115 }
   15116 
   15117 static int
   15118 wm_get_nvm_82571(struct wm_softc *sc)
   15119 {
   15120 	int rv;
   15121 
   15122 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15123 		device_xname(sc->sc_dev), __func__));
   15124 
   15125 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   15126 		return rv;
   15127 
   15128 	switch (sc->sc_type) {
   15129 	case WM_T_82573:
   15130 		break;
   15131 	default:
   15132 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15133 			rv = wm_get_eecd(sc);
   15134 		break;
   15135 	}
   15136 
   15137 	if (rv != 0) {
   15138 		aprint_error_dev(sc->sc_dev,
   15139 		    "%s: failed to get semaphore\n",
   15140 		    __func__);
   15141 		wm_put_swsm_semaphore(sc);
   15142 	}
   15143 
   15144 	return rv;
   15145 }
   15146 
   15147 static void
   15148 wm_put_nvm_82571(struct wm_softc *sc)
   15149 {
   15150 
   15151 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15152 		device_xname(sc->sc_dev), __func__));
   15153 
   15154 	switch (sc->sc_type) {
   15155 	case WM_T_82573:
   15156 		break;
   15157 	default:
   15158 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15159 			wm_put_eecd(sc);
   15160 		break;
   15161 	}
   15162 
   15163 	wm_put_swsm_semaphore(sc);
   15164 }
   15165 
   15166 static int
   15167 wm_get_phy_82575(struct wm_softc *sc)
   15168 {
   15169 
   15170 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15171 		device_xname(sc->sc_dev), __func__));
   15172 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15173 }
   15174 
   15175 static void
   15176 wm_put_phy_82575(struct wm_softc *sc)
   15177 {
   15178 
   15179 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15180 		device_xname(sc->sc_dev), __func__));
   15181 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15182 }
   15183 
   15184 static int
   15185 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   15186 {
   15187 	uint32_t ext_ctrl;
   15188 	int timeout = 200;
   15189 
   15190 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15191 		device_xname(sc->sc_dev), __func__));
   15192 
   15193 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15194 	for (timeout = 0; timeout < 200; timeout++) {
   15195 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15196 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15197 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15198 
   15199 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15200 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15201 			return 0;
   15202 		delay(5000);
   15203 	}
   15204 	device_printf(sc->sc_dev,
   15205 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   15206 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15207 	return -1;
   15208 }
   15209 
   15210 static void
   15211 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   15212 {
   15213 	uint32_t ext_ctrl;
   15214 
   15215 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15216 		device_xname(sc->sc_dev), __func__));
   15217 
   15218 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15219 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15220 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15221 
   15222 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15223 }
   15224 
   15225 static int
   15226 wm_get_swflag_ich8lan(struct wm_softc *sc)
   15227 {
   15228 	uint32_t ext_ctrl;
   15229 	int timeout;
   15230 
   15231 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15232 		device_xname(sc->sc_dev), __func__));
   15233 	mutex_enter(sc->sc_ich_phymtx);
   15234 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   15235 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15236 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   15237 			break;
   15238 		delay(1000);
   15239 	}
   15240 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   15241 		device_printf(sc->sc_dev,
   15242 		    "SW has already locked the resource\n");
   15243 		goto out;
   15244 	}
   15245 
   15246 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15247 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15248 	for (timeout = 0; timeout < 1000; timeout++) {
   15249 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15250 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15251 			break;
   15252 		delay(1000);
   15253 	}
   15254 	if (timeout >= 1000) {
   15255 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   15256 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15257 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15258 		goto out;
   15259 	}
   15260 	return 0;
   15261 
   15262 out:
   15263 	mutex_exit(sc->sc_ich_phymtx);
   15264 	return -1;
   15265 }
   15266 
   15267 static void
   15268 wm_put_swflag_ich8lan(struct wm_softc *sc)
   15269 {
   15270 	uint32_t ext_ctrl;
   15271 
   15272 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15273 		device_xname(sc->sc_dev), __func__));
   15274 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15275 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   15276 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15277 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15278 	} else
   15279 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   15280 
   15281 	mutex_exit(sc->sc_ich_phymtx);
   15282 }
   15283 
   15284 static int
   15285 wm_get_nvm_ich8lan(struct wm_softc *sc)
   15286 {
   15287 
   15288 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15289 		device_xname(sc->sc_dev), __func__));
   15290 	mutex_enter(sc->sc_ich_nvmmtx);
   15291 
   15292 	return 0;
   15293 }
   15294 
   15295 static void
   15296 wm_put_nvm_ich8lan(struct wm_softc *sc)
   15297 {
   15298 
   15299 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15300 		device_xname(sc->sc_dev), __func__));
   15301 	mutex_exit(sc->sc_ich_nvmmtx);
   15302 }
   15303 
   15304 static int
   15305 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   15306 {
   15307 	int i = 0;
   15308 	uint32_t reg;
   15309 
   15310 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15311 		device_xname(sc->sc_dev), __func__));
   15312 
   15313 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15314 	do {
   15315 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   15316 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15317 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15318 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   15319 			break;
   15320 		delay(2*1000);
   15321 		i++;
   15322 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   15323 
   15324 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   15325 		wm_put_hw_semaphore_82573(sc);
   15326 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   15327 		    device_xname(sc->sc_dev));
   15328 		return -1;
   15329 	}
   15330 
   15331 	return 0;
   15332 }
   15333 
   15334 static void
   15335 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   15336 {
   15337 	uint32_t reg;
   15338 
   15339 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15340 		device_xname(sc->sc_dev), __func__));
   15341 
   15342 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15343 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15344 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15345 }
   15346 
   15347 /*
   15348  * Management mode and power management related subroutines.
   15349  * BMC, AMT, suspend/resume and EEE.
   15350  */
   15351 
   15352 #ifdef WM_WOL
   15353 static int
   15354 wm_check_mng_mode(struct wm_softc *sc)
   15355 {
   15356 	int rv;
   15357 
   15358 	switch (sc->sc_type) {
   15359 	case WM_T_ICH8:
   15360 	case WM_T_ICH9:
   15361 	case WM_T_ICH10:
   15362 	case WM_T_PCH:
   15363 	case WM_T_PCH2:
   15364 	case WM_T_PCH_LPT:
   15365 	case WM_T_PCH_SPT:
   15366 	case WM_T_PCH_CNP:
   15367 		rv = wm_check_mng_mode_ich8lan(sc);
   15368 		break;
   15369 	case WM_T_82574:
   15370 	case WM_T_82583:
   15371 		rv = wm_check_mng_mode_82574(sc);
   15372 		break;
   15373 	case WM_T_82571:
   15374 	case WM_T_82572:
   15375 	case WM_T_82573:
   15376 	case WM_T_80003:
   15377 		rv = wm_check_mng_mode_generic(sc);
   15378 		break;
   15379 	default:
   15380 		/* Noting to do */
   15381 		rv = 0;
   15382 		break;
   15383 	}
   15384 
   15385 	return rv;
   15386 }
   15387 
   15388 static int
   15389 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   15390 {
   15391 	uint32_t fwsm;
   15392 
   15393 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15394 
   15395 	if (((fwsm & FWSM_FW_VALID) != 0)
   15396 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   15397 		return 1;
   15398 
   15399 	return 0;
   15400 }
   15401 
   15402 static int
   15403 wm_check_mng_mode_82574(struct wm_softc *sc)
   15404 {
   15405 	uint16_t data;
   15406 
   15407 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15408 
   15409 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   15410 		return 1;
   15411 
   15412 	return 0;
   15413 }
   15414 
   15415 static int
   15416 wm_check_mng_mode_generic(struct wm_softc *sc)
   15417 {
   15418 	uint32_t fwsm;
   15419 
   15420 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15421 
   15422 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   15423 		return 1;
   15424 
   15425 	return 0;
   15426 }
   15427 #endif /* WM_WOL */
   15428 
   15429 static int
   15430 wm_enable_mng_pass_thru(struct wm_softc *sc)
   15431 {
   15432 	uint32_t manc, fwsm, factps;
   15433 
   15434 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   15435 		return 0;
   15436 
   15437 	manc = CSR_READ(sc, WMREG_MANC);
   15438 
   15439 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   15440 		device_xname(sc->sc_dev), manc));
   15441 	if ((manc & MANC_RECV_TCO_EN) == 0)
   15442 		return 0;
   15443 
   15444 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   15445 		fwsm = CSR_READ(sc, WMREG_FWSM);
   15446 		factps = CSR_READ(sc, WMREG_FACTPS);
   15447 		if (((factps & FACTPS_MNGCG) == 0)
   15448 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   15449 			return 1;
   15450 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   15451 		uint16_t data;
   15452 
   15453 		factps = CSR_READ(sc, WMREG_FACTPS);
   15454 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15455 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   15456 			device_xname(sc->sc_dev), factps, data));
   15457 		if (((factps & FACTPS_MNGCG) == 0)
   15458 		    && ((data & NVM_CFG2_MNGM_MASK)
   15459 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   15460 			return 1;
   15461 	} else if (((manc & MANC_SMBUS_EN) != 0)
   15462 	    && ((manc & MANC_ASF_EN) == 0))
   15463 		return 1;
   15464 
   15465 	return 0;
   15466 }
   15467 
   15468 static bool
   15469 wm_phy_resetisblocked(struct wm_softc *sc)
   15470 {
   15471 	bool blocked = false;
   15472 	uint32_t reg;
   15473 	int i = 0;
   15474 
   15475 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15476 		device_xname(sc->sc_dev), __func__));
   15477 
   15478 	switch (sc->sc_type) {
   15479 	case WM_T_ICH8:
   15480 	case WM_T_ICH9:
   15481 	case WM_T_ICH10:
   15482 	case WM_T_PCH:
   15483 	case WM_T_PCH2:
   15484 	case WM_T_PCH_LPT:
   15485 	case WM_T_PCH_SPT:
   15486 	case WM_T_PCH_CNP:
   15487 		do {
   15488 			reg = CSR_READ(sc, WMREG_FWSM);
   15489 			if ((reg & FWSM_RSPCIPHY) == 0) {
   15490 				blocked = true;
   15491 				delay(10*1000);
   15492 				continue;
   15493 			}
   15494 			blocked = false;
   15495 		} while (blocked && (i++ < 30));
   15496 		return blocked;
   15497 		break;
   15498 	case WM_T_82571:
   15499 	case WM_T_82572:
   15500 	case WM_T_82573:
   15501 	case WM_T_82574:
   15502 	case WM_T_82583:
   15503 	case WM_T_80003:
   15504 		reg = CSR_READ(sc, WMREG_MANC);
   15505 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   15506 			return true;
   15507 		else
   15508 			return false;
   15509 		break;
   15510 	default:
   15511 		/* No problem */
   15512 		break;
   15513 	}
   15514 
   15515 	return false;
   15516 }
   15517 
   15518 static void
   15519 wm_get_hw_control(struct wm_softc *sc)
   15520 {
   15521 	uint32_t reg;
   15522 
   15523 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15524 		device_xname(sc->sc_dev), __func__));
   15525 
   15526 	if (sc->sc_type == WM_T_82573) {
   15527 		reg = CSR_READ(sc, WMREG_SWSM);
   15528 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   15529 	} else if (sc->sc_type >= WM_T_82571) {
   15530 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15531 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   15532 	}
   15533 }
   15534 
   15535 static void
   15536 wm_release_hw_control(struct wm_softc *sc)
   15537 {
   15538 	uint32_t reg;
   15539 
   15540 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15541 		device_xname(sc->sc_dev), __func__));
   15542 
   15543 	if (sc->sc_type == WM_T_82573) {
   15544 		reg = CSR_READ(sc, WMREG_SWSM);
   15545 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   15546 	} else if (sc->sc_type >= WM_T_82571) {
   15547 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15548 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   15549 	}
   15550 }
   15551 
   15552 static void
   15553 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   15554 {
   15555 	uint32_t reg;
   15556 
   15557 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15558 		device_xname(sc->sc_dev), __func__));
   15559 
   15560 	if (sc->sc_type < WM_T_PCH2)
   15561 		return;
   15562 
   15563 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15564 
   15565 	if (gate)
   15566 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   15567 	else
   15568 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   15569 
   15570 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15571 }
   15572 
   15573 static int
   15574 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   15575 {
   15576 	uint32_t fwsm, reg;
   15577 	int rv;
   15578 
   15579 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15580 		device_xname(sc->sc_dev), __func__));
   15581 
   15582 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   15583 	wm_gate_hw_phy_config_ich8lan(sc, true);
   15584 
   15585 	/* Disable ULP */
   15586 	wm_ulp_disable(sc);
   15587 
   15588 	/* Acquire PHY semaphore */
   15589 	rv = sc->phy.acquire(sc);
   15590 	if (rv != 0) {
   15591 		DPRINTF(sc, WM_DEBUG_INIT,
   15592 		    ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
   15593 		return rv;
   15594 	}
   15595 
   15596 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   15597 	 * inaccessible and resetting the PHY is not blocked, toggle the
   15598 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   15599 	 */
   15600 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15601 	switch (sc->sc_type) {
   15602 	case WM_T_PCH_LPT:
   15603 	case WM_T_PCH_SPT:
   15604 	case WM_T_PCH_CNP:
   15605 		if (wm_phy_is_accessible_pchlan(sc))
   15606 			break;
   15607 
   15608 		/* Before toggling LANPHYPC, see if PHY is accessible by
   15609 		 * forcing MAC to SMBus mode first.
   15610 		 */
   15611 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15612 		reg |= CTRL_EXT_FORCE_SMBUS;
   15613 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15614 #if 0
   15615 		/* XXX Isn't this required??? */
   15616 		CSR_WRITE_FLUSH(sc);
   15617 #endif
   15618 		/* Wait 50 milliseconds for MAC to finish any retries
   15619 		 * that it might be trying to perform from previous
   15620 		 * attempts to acknowledge any phy read requests.
   15621 		 */
   15622 		delay(50 * 1000);
   15623 		/* FALLTHROUGH */
   15624 	case WM_T_PCH2:
   15625 		if (wm_phy_is_accessible_pchlan(sc) == true)
   15626 			break;
   15627 		/* FALLTHROUGH */
   15628 	case WM_T_PCH:
   15629 		if (sc->sc_type == WM_T_PCH)
   15630 			if ((fwsm & FWSM_FW_VALID) != 0)
   15631 				break;
   15632 
   15633 		if (wm_phy_resetisblocked(sc) == true) {
   15634 			device_printf(sc->sc_dev, "XXX reset is blocked(2)\n");
   15635 			break;
   15636 		}
   15637 
   15638 		/* Toggle LANPHYPC Value bit */
   15639 		wm_toggle_lanphypc_pch_lpt(sc);
   15640 
   15641 		if (sc->sc_type >= WM_T_PCH_LPT) {
   15642 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15643 				break;
   15644 
   15645 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   15646 			 * so ensure that the MAC is also out of SMBus mode
   15647 			 */
   15648 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15649 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15650 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15651 
   15652 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15653 				break;
   15654 			rv = -1;
   15655 		}
   15656 		break;
   15657 	default:
   15658 		break;
   15659 	}
   15660 
   15661 	/* Release semaphore */
   15662 	sc->phy.release(sc);
   15663 
   15664 	if (rv == 0) {
   15665 		/* Check to see if able to reset PHY.  Print error if not */
   15666 		if (wm_phy_resetisblocked(sc)) {
   15667 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   15668 			goto out;
   15669 		}
   15670 
   15671 		/* Reset the PHY before any access to it.  Doing so, ensures
   15672 		 * that the PHY is in a known good state before we read/write
   15673 		 * PHY registers.  The generic reset is sufficient here,
   15674 		 * because we haven't determined the PHY type yet.
   15675 		 */
   15676 		if (wm_reset_phy(sc) != 0)
   15677 			goto out;
   15678 
   15679 		/* On a successful reset, possibly need to wait for the PHY
   15680 		 * to quiesce to an accessible state before returning control
   15681 		 * to the calling function.  If the PHY does not quiesce, then
   15682 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   15683 		 *  the PHY is in.
   15684 		 */
   15685 		if (wm_phy_resetisblocked(sc))
   15686 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15687 	}
   15688 
   15689 out:
   15690 	/* Ungate automatic PHY configuration on non-managed 82579 */
   15691 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   15692 		delay(10*1000);
   15693 		wm_gate_hw_phy_config_ich8lan(sc, false);
   15694 	}
   15695 
   15696 	return 0;
   15697 }
   15698 
   15699 static void
   15700 wm_init_manageability(struct wm_softc *sc)
   15701 {
   15702 
   15703 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15704 		device_xname(sc->sc_dev), __func__));
   15705 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
   15706 
   15707 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15708 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   15709 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15710 
   15711 		/* Disable hardware interception of ARP */
   15712 		manc &= ~MANC_ARP_EN;
   15713 
   15714 		/* Enable receiving management packets to the host */
   15715 		if (sc->sc_type >= WM_T_82571) {
   15716 			manc |= MANC_EN_MNG2HOST;
   15717 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   15718 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   15719 		}
   15720 
   15721 		CSR_WRITE(sc, WMREG_MANC, manc);
   15722 	}
   15723 }
   15724 
   15725 static void
   15726 wm_release_manageability(struct wm_softc *sc)
   15727 {
   15728 
   15729 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15730 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15731 
   15732 		manc |= MANC_ARP_EN;
   15733 		if (sc->sc_type >= WM_T_82571)
   15734 			manc &= ~MANC_EN_MNG2HOST;
   15735 
   15736 		CSR_WRITE(sc, WMREG_MANC, manc);
   15737 	}
   15738 }
   15739 
   15740 static void
   15741 wm_get_wakeup(struct wm_softc *sc)
   15742 {
   15743 
   15744 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   15745 	switch (sc->sc_type) {
   15746 	case WM_T_82573:
   15747 	case WM_T_82583:
   15748 		sc->sc_flags |= WM_F_HAS_AMT;
   15749 		/* FALLTHROUGH */
   15750 	case WM_T_80003:
   15751 	case WM_T_82575:
   15752 	case WM_T_82576:
   15753 	case WM_T_82580:
   15754 	case WM_T_I350:
   15755 	case WM_T_I354:
   15756 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   15757 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   15758 		/* FALLTHROUGH */
   15759 	case WM_T_82541:
   15760 	case WM_T_82541_2:
   15761 	case WM_T_82547:
   15762 	case WM_T_82547_2:
   15763 	case WM_T_82571:
   15764 	case WM_T_82572:
   15765 	case WM_T_82574:
   15766 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15767 		break;
   15768 	case WM_T_ICH8:
   15769 	case WM_T_ICH9:
   15770 	case WM_T_ICH10:
   15771 	case WM_T_PCH:
   15772 	case WM_T_PCH2:
   15773 	case WM_T_PCH_LPT:
   15774 	case WM_T_PCH_SPT:
   15775 	case WM_T_PCH_CNP:
   15776 		sc->sc_flags |= WM_F_HAS_AMT;
   15777 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15778 		break;
   15779 	default:
   15780 		break;
   15781 	}
   15782 
   15783 	/* 1: HAS_MANAGE */
   15784 	if (wm_enable_mng_pass_thru(sc) != 0)
   15785 		sc->sc_flags |= WM_F_HAS_MANAGE;
   15786 
   15787 	/*
   15788 	 * Note that the WOL flags is set after the resetting of the eeprom
   15789 	 * stuff
   15790 	 */
   15791 }
   15792 
   15793 /*
   15794  * Unconfigure Ultra Low Power mode.
   15795  * Only for I217 and newer (see below).
   15796  */
   15797 static int
   15798 wm_ulp_disable(struct wm_softc *sc)
   15799 {
   15800 	uint32_t reg;
   15801 	uint16_t phyreg;
   15802 	int i = 0, rv;
   15803 
   15804 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15805 		device_xname(sc->sc_dev), __func__));
   15806 	/* Exclude old devices */
   15807 	if ((sc->sc_type < WM_T_PCH_LPT)
   15808 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   15809 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   15810 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   15811 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   15812 		return 0;
   15813 
   15814 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   15815 		/* Request ME un-configure ULP mode in the PHY */
   15816 		reg = CSR_READ(sc, WMREG_H2ME);
   15817 		reg &= ~H2ME_ULP;
   15818 		reg |= H2ME_ENFORCE_SETTINGS;
   15819 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15820 
   15821 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   15822 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   15823 			if (i++ == 30) {
   15824 				device_printf(sc->sc_dev, "%s timed out\n",
   15825 				    __func__);
   15826 				return -1;
   15827 			}
   15828 			delay(10 * 1000);
   15829 		}
   15830 		reg = CSR_READ(sc, WMREG_H2ME);
   15831 		reg &= ~H2ME_ENFORCE_SETTINGS;
   15832 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15833 
   15834 		return 0;
   15835 	}
   15836 
   15837 	/* Acquire semaphore */
   15838 	rv = sc->phy.acquire(sc);
   15839 	if (rv != 0) {
   15840 		DPRINTF(sc, WM_DEBUG_INIT,
   15841 		    ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
   15842 		return rv;
   15843 	}
   15844 
   15845 	/* Toggle LANPHYPC */
   15846 	wm_toggle_lanphypc_pch_lpt(sc);
   15847 
   15848 	/* Unforce SMBus mode in PHY */
   15849 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   15850 	if (rv != 0) {
   15851 		uint32_t reg2;
   15852 
   15853 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   15854 		    __func__);
   15855 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   15856 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   15857 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   15858 		delay(50 * 1000);
   15859 
   15860 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   15861 		    &phyreg);
   15862 		if (rv != 0)
   15863 			goto release;
   15864 	}
   15865 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15866 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   15867 
   15868 	/* Unforce SMBus mode in MAC */
   15869 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15870 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   15871 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15872 
   15873 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   15874 	if (rv != 0)
   15875 		goto release;
   15876 	phyreg |= HV_PM_CTRL_K1_ENA;
   15877 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   15878 
   15879 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   15880 	    &phyreg);
   15881 	if (rv != 0)
   15882 		goto release;
   15883 	phyreg &= ~(I218_ULP_CONFIG1_IND
   15884 	    | I218_ULP_CONFIG1_STICKY_ULP
   15885 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   15886 	    | I218_ULP_CONFIG1_WOL_HOST
   15887 	    | I218_ULP_CONFIG1_INBAND_EXIT
   15888 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   15889 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   15890 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   15891 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15892 	phyreg |= I218_ULP_CONFIG1_START;
   15893 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15894 
   15895 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15896 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   15897 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15898 
   15899 release:
   15900 	/* Release semaphore */
   15901 	sc->phy.release(sc);
   15902 	wm_gmii_reset(sc);
   15903 	delay(50 * 1000);
   15904 
   15905 	return rv;
   15906 }
   15907 
   15908 /* WOL in the newer chipset interfaces (pchlan) */
   15909 static int
   15910 wm_enable_phy_wakeup(struct wm_softc *sc)
   15911 {
   15912 	device_t dev = sc->sc_dev;
   15913 	uint32_t mreg, moff;
   15914 	uint16_t wuce, wuc, wufc, preg;
   15915 	int i, rv;
   15916 
   15917 	KASSERT(sc->sc_type >= WM_T_PCH);
   15918 
   15919 	/* Copy MAC RARs to PHY RARs */
   15920 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   15921 
   15922 	/* Activate PHY wakeup */
   15923 	rv = sc->phy.acquire(sc);
   15924 	if (rv != 0) {
   15925 		device_printf(dev, "%s: failed to acquire semaphore\n",
   15926 		    __func__);
   15927 		return rv;
   15928 	}
   15929 
   15930 	/*
   15931 	 * Enable access to PHY wakeup registers.
   15932 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   15933 	 */
   15934 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   15935 	if (rv != 0) {
   15936 		device_printf(dev,
   15937 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   15938 		goto release;
   15939 	}
   15940 
   15941 	/* Copy MAC MTA to PHY MTA */
   15942 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   15943 		uint16_t lo, hi;
   15944 
   15945 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   15946 		lo = (uint16_t)(mreg & 0xffff);
   15947 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   15948 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   15949 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   15950 	}
   15951 
   15952 	/* Configure PHY Rx Control register */
   15953 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   15954 	mreg = CSR_READ(sc, WMREG_RCTL);
   15955 	if (mreg & RCTL_UPE)
   15956 		preg |= BM_RCTL_UPE;
   15957 	if (mreg & RCTL_MPE)
   15958 		preg |= BM_RCTL_MPE;
   15959 	preg &= ~(BM_RCTL_MO_MASK);
   15960 	moff = __SHIFTOUT(mreg, RCTL_MO);
   15961 	if (moff != 0)
   15962 		preg |= moff << BM_RCTL_MO_SHIFT;
   15963 	if (mreg & RCTL_BAM)
   15964 		preg |= BM_RCTL_BAM;
   15965 	if (mreg & RCTL_PMCF)
   15966 		preg |= BM_RCTL_PMCF;
   15967 	mreg = CSR_READ(sc, WMREG_CTRL);
   15968 	if (mreg & CTRL_RFCE)
   15969 		preg |= BM_RCTL_RFCE;
   15970 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   15971 
   15972 	wuc = WUC_APME | WUC_PME_EN;
   15973 	wufc = WUFC_MAG;
   15974 	/* Enable PHY wakeup in MAC register */
   15975 	CSR_WRITE(sc, WMREG_WUC,
   15976 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   15977 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   15978 
   15979 	/* Configure and enable PHY wakeup in PHY registers */
   15980 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   15981 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   15982 
   15983 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   15984 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15985 
   15986 release:
   15987 	sc->phy.release(sc);
   15988 
   15989 	return 0;
   15990 }
   15991 
   15992 /* Power down workaround on D3 */
   15993 static void
   15994 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   15995 {
   15996 	uint32_t reg;
   15997 	uint16_t phyreg;
   15998 	int i;
   15999 
   16000 	for (i = 0; i < 2; i++) {
   16001 		/* Disable link */
   16002 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16003 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   16004 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16005 
   16006 		/*
   16007 		 * Call gig speed drop workaround on Gig disable before
   16008 		 * accessing any PHY registers
   16009 		 */
   16010 		if (sc->sc_type == WM_T_ICH8)
   16011 			wm_gig_downshift_workaround_ich8lan(sc);
   16012 
   16013 		/* Write VR power-down enable */
   16014 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   16015 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   16016 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   16017 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   16018 
   16019 		/* Read it back and test */
   16020 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   16021 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   16022 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   16023 			break;
   16024 
   16025 		/* Issue PHY reset and repeat at most one more time */
   16026 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   16027 	}
   16028 }
   16029 
   16030 /*
   16031  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   16032  *  @sc: pointer to the HW structure
   16033  *
   16034  *  During S0 to Sx transition, it is possible the link remains at gig
   16035  *  instead of negotiating to a lower speed.  Before going to Sx, set
   16036  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   16037  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   16038  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   16039  *  needs to be written.
   16040  *  Parts that support (and are linked to a partner which support) EEE in
   16041  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   16042  *  than 10Mbps w/o EEE.
   16043  */
   16044 static void
   16045 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   16046 {
   16047 	device_t dev = sc->sc_dev;
   16048 	struct ethercom *ec = &sc->sc_ethercom;
   16049 	uint32_t phy_ctrl;
   16050 	int rv;
   16051 
   16052 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   16053 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   16054 
   16055 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   16056 
   16057 	if (sc->sc_phytype == WMPHY_I217) {
   16058 		uint16_t devid = sc->sc_pcidevid;
   16059 
   16060 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   16061 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   16062 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   16063 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   16064 		    (sc->sc_type >= WM_T_PCH_SPT))
   16065 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   16066 			    CSR_READ(sc, WMREG_FEXTNVM6)
   16067 			    & ~FEXTNVM6_REQ_PLL_CLK);
   16068 
   16069 		if (sc->phy.acquire(sc) != 0)
   16070 			goto out;
   16071 
   16072 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16073 			uint16_t eee_advert;
   16074 
   16075 			rv = wm_read_emi_reg_locked(dev,
   16076 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   16077 			if (rv)
   16078 				goto release;
   16079 
   16080 			/*
   16081 			 * Disable LPLU if both link partners support 100BaseT
   16082 			 * EEE and 100Full is advertised on both ends of the
   16083 			 * link, and enable Auto Enable LPI since there will
   16084 			 * be no driver to enable LPI while in Sx.
   16085 			 */
   16086 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   16087 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   16088 				uint16_t anar, phy_reg;
   16089 
   16090 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   16091 				    &anar);
   16092 				if (anar & ANAR_TX_FD) {
   16093 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   16094 					    PHY_CTRL_NOND0A_LPLU);
   16095 
   16096 					/* Set Auto Enable LPI after link up */
   16097 					sc->phy.readreg_locked(dev, 2,
   16098 					    I217_LPI_GPIO_CTRL, &phy_reg);
   16099 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16100 					sc->phy.writereg_locked(dev, 2,
   16101 					    I217_LPI_GPIO_CTRL, phy_reg);
   16102 				}
   16103 			}
   16104 		}
   16105 
   16106 		/*
   16107 		 * For i217 Intel Rapid Start Technology support,
   16108 		 * when the system is going into Sx and no manageability engine
   16109 		 * is present, the driver must configure proxy to reset only on
   16110 		 * power good.	LPI (Low Power Idle) state must also reset only
   16111 		 * on power good, as well as the MTA (Multicast table array).
   16112 		 * The SMBus release must also be disabled on LCD reset.
   16113 		 */
   16114 
   16115 		/*
   16116 		 * Enable MTA to reset for Intel Rapid Start Technology
   16117 		 * Support
   16118 		 */
   16119 
   16120 release:
   16121 		sc->phy.release(sc);
   16122 	}
   16123 out:
   16124 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   16125 
   16126 	if (sc->sc_type == WM_T_ICH8)
   16127 		wm_gig_downshift_workaround_ich8lan(sc);
   16128 
   16129 	if (sc->sc_type >= WM_T_PCH) {
   16130 		wm_oem_bits_config_ich8lan(sc, false);
   16131 
   16132 		/* Reset PHY to activate OEM bits on 82577/8 */
   16133 		if (sc->sc_type == WM_T_PCH)
   16134 			wm_reset_phy(sc);
   16135 
   16136 		if (sc->phy.acquire(sc) != 0)
   16137 			return;
   16138 		wm_write_smbus_addr(sc);
   16139 		sc->phy.release(sc);
   16140 	}
   16141 }
   16142 
   16143 /*
   16144  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   16145  *  @sc: pointer to the HW structure
   16146  *
   16147  *  During Sx to S0 transitions on non-managed devices or managed devices
   16148  *  on which PHY resets are not blocked, if the PHY registers cannot be
   16149  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   16150  *  the PHY.
   16151  *  On i217, setup Intel Rapid Start Technology.
   16152  */
   16153 static int
   16154 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   16155 {
   16156 	device_t dev = sc->sc_dev;
   16157 	int rv;
   16158 
   16159 	if (sc->sc_type < WM_T_PCH2)
   16160 		return 0;
   16161 
   16162 	rv = wm_init_phy_workarounds_pchlan(sc);
   16163 	if (rv != 0)
   16164 		return rv;
   16165 
   16166 	/* For i217 Intel Rapid Start Technology support when the system
   16167 	 * is transitioning from Sx and no manageability engine is present
   16168 	 * configure SMBus to restore on reset, disable proxy, and enable
   16169 	 * the reset on MTA (Multicast table array).
   16170 	 */
   16171 	if (sc->sc_phytype == WMPHY_I217) {
   16172 		uint16_t phy_reg;
   16173 
   16174 		rv = sc->phy.acquire(sc);
   16175 		if (rv != 0)
   16176 			return rv;
   16177 
   16178 		/* Clear Auto Enable LPI after link up */
   16179 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   16180 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16181 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   16182 
   16183 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16184 			/* Restore clear on SMB if no manageability engine
   16185 			 * is present
   16186 			 */
   16187 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   16188 			    &phy_reg);
   16189 			if (rv != 0)
   16190 				goto release;
   16191 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   16192 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   16193 
   16194 			/* Disable Proxy */
   16195 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   16196 		}
   16197 		/* Enable reset on MTA */
   16198 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   16199 		if (rv != 0)
   16200 			goto release;
   16201 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   16202 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   16203 
   16204 release:
   16205 		sc->phy.release(sc);
   16206 		return rv;
   16207 	}
   16208 
   16209 	return 0;
   16210 }
   16211 
   16212 static void
   16213 wm_enable_wakeup(struct wm_softc *sc)
   16214 {
   16215 	uint32_t reg, pmreg;
   16216 	pcireg_t pmode;
   16217 	int rv = 0;
   16218 
   16219 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16220 		device_xname(sc->sc_dev), __func__));
   16221 
   16222 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16223 	    &pmreg, NULL) == 0)
   16224 		return;
   16225 
   16226 	if ((sc->sc_flags & WM_F_WOL) == 0)
   16227 		goto pme;
   16228 
   16229 	/* Advertise the wakeup capability */
   16230 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   16231 	    | CTRL_SWDPIN(3));
   16232 
   16233 	/* Keep the laser running on fiber adapters */
   16234 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   16235 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   16236 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16237 		reg |= CTRL_EXT_SWDPIN(3);
   16238 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16239 	}
   16240 
   16241 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   16242 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   16243 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   16244 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   16245 		wm_suspend_workarounds_ich8lan(sc);
   16246 
   16247 #if 0	/* For the multicast packet */
   16248 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   16249 	reg |= WUFC_MC;
   16250 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   16251 #endif
   16252 
   16253 	if (sc->sc_type >= WM_T_PCH) {
   16254 		rv = wm_enable_phy_wakeup(sc);
   16255 		if (rv != 0)
   16256 			goto pme;
   16257 	} else {
   16258 		/* Enable wakeup by the MAC */
   16259 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   16260 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   16261 	}
   16262 
   16263 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   16264 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   16265 		|| (sc->sc_type == WM_T_PCH2))
   16266 	    && (sc->sc_phytype == WMPHY_IGP_3))
   16267 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   16268 
   16269 pme:
   16270 	/* Request PME */
   16271 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   16272 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   16273 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   16274 		/* For WOL */
   16275 		pmode |= PCI_PMCSR_PME_EN;
   16276 	} else {
   16277 		/* Disable WOL */
   16278 		pmode &= ~PCI_PMCSR_PME_EN;
   16279 	}
   16280 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   16281 }
   16282 
   16283 /* Disable ASPM L0s and/or L1 for workaround */
   16284 static void
   16285 wm_disable_aspm(struct wm_softc *sc)
   16286 {
   16287 	pcireg_t reg, mask = 0;
   16288 	unsigned const char *str = "";
   16289 
   16290 	/*
   16291 	 *  Only for PCIe device which has PCIe capability in the PCI config
   16292 	 * space.
   16293 	 */
   16294 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   16295 		return;
   16296 
   16297 	switch (sc->sc_type) {
   16298 	case WM_T_82571:
   16299 	case WM_T_82572:
   16300 		/*
   16301 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   16302 		 * State Power management L1 State (ASPM L1).
   16303 		 */
   16304 		mask = PCIE_LCSR_ASPM_L1;
   16305 		str = "L1 is";
   16306 		break;
   16307 	case WM_T_82573:
   16308 	case WM_T_82574:
   16309 	case WM_T_82583:
   16310 		/*
   16311 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   16312 		 *
   16313 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   16314 		 * some chipset.  The document of 82574 and 82583 says that
   16315 		 * disabling L0s with some specific chipset is sufficient,
   16316 		 * but we follow as of the Intel em driver does.
   16317 		 *
   16318 		 * References:
   16319 		 * Errata 8 of the Specification Update of i82573.
   16320 		 * Errata 20 of the Specification Update of i82574.
   16321 		 * Errata 9 of the Specification Update of i82583.
   16322 		 */
   16323 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   16324 		str = "L0s and L1 are";
   16325 		break;
   16326 	default:
   16327 		return;
   16328 	}
   16329 
   16330 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16331 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   16332 	reg &= ~mask;
   16333 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16334 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   16335 
   16336 	/* Print only in wm_attach() */
   16337 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   16338 		aprint_verbose_dev(sc->sc_dev,
   16339 		    "ASPM %s disabled to workaround the errata.\n", str);
   16340 }
   16341 
   16342 /* LPLU */
   16343 
   16344 static void
   16345 wm_lplu_d0_disable(struct wm_softc *sc)
   16346 {
   16347 	struct mii_data *mii = &sc->sc_mii;
   16348 	uint32_t reg;
   16349 	uint16_t phyval;
   16350 
   16351 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16352 		device_xname(sc->sc_dev), __func__));
   16353 
   16354 	if (sc->sc_phytype == WMPHY_IFE)
   16355 		return;
   16356 
   16357 	switch (sc->sc_type) {
   16358 	case WM_T_82571:
   16359 	case WM_T_82572:
   16360 	case WM_T_82573:
   16361 	case WM_T_82575:
   16362 	case WM_T_82576:
   16363 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
   16364 		phyval &= ~PMR_D0_LPLU;
   16365 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
   16366 		break;
   16367 	case WM_T_82580:
   16368 	case WM_T_I350:
   16369 	case WM_T_I210:
   16370 	case WM_T_I211:
   16371 		reg = CSR_READ(sc, WMREG_PHPM);
   16372 		reg &= ~PHPM_D0A_LPLU;
   16373 		CSR_WRITE(sc, WMREG_PHPM, reg);
   16374 		break;
   16375 	case WM_T_82574:
   16376 	case WM_T_82583:
   16377 	case WM_T_ICH8:
   16378 	case WM_T_ICH9:
   16379 	case WM_T_ICH10:
   16380 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16381 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   16382 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16383 		CSR_WRITE_FLUSH(sc);
   16384 		break;
   16385 	case WM_T_PCH:
   16386 	case WM_T_PCH2:
   16387 	case WM_T_PCH_LPT:
   16388 	case WM_T_PCH_SPT:
   16389 	case WM_T_PCH_CNP:
   16390 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   16391 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   16392 		if (wm_phy_resetisblocked(sc) == false)
   16393 			phyval |= HV_OEM_BITS_ANEGNOW;
   16394 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   16395 		break;
   16396 	default:
   16397 		break;
   16398 	}
   16399 }
   16400 
   16401 /* EEE */
   16402 
   16403 static int
   16404 wm_set_eee_i350(struct wm_softc *sc)
   16405 {
   16406 	struct ethercom *ec = &sc->sc_ethercom;
   16407 	uint32_t ipcnfg, eeer;
   16408 	uint32_t ipcnfg_mask
   16409 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   16410 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   16411 
   16412 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   16413 
   16414 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   16415 	eeer = CSR_READ(sc, WMREG_EEER);
   16416 
   16417 	/* Enable or disable per user setting */
   16418 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16419 		ipcnfg |= ipcnfg_mask;
   16420 		eeer |= eeer_mask;
   16421 	} else {
   16422 		ipcnfg &= ~ipcnfg_mask;
   16423 		eeer &= ~eeer_mask;
   16424 	}
   16425 
   16426 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   16427 	CSR_WRITE(sc, WMREG_EEER, eeer);
   16428 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   16429 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   16430 
   16431 	return 0;
   16432 }
   16433 
   16434 static int
   16435 wm_set_eee_pchlan(struct wm_softc *sc)
   16436 {
   16437 	device_t dev = sc->sc_dev;
   16438 	struct ethercom *ec = &sc->sc_ethercom;
   16439 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   16440 	int rv;
   16441 
   16442 	switch (sc->sc_phytype) {
   16443 	case WMPHY_82579:
   16444 		lpa = I82579_EEE_LP_ABILITY;
   16445 		pcs_status = I82579_EEE_PCS_STATUS;
   16446 		adv_addr = I82579_EEE_ADVERTISEMENT;
   16447 		break;
   16448 	case WMPHY_I217:
   16449 		lpa = I217_EEE_LP_ABILITY;
   16450 		pcs_status = I217_EEE_PCS_STATUS;
   16451 		adv_addr = I217_EEE_ADVERTISEMENT;
   16452 		break;
   16453 	default:
   16454 		return 0;
   16455 	}
   16456 
   16457 	rv = sc->phy.acquire(sc);
   16458 	if (rv != 0) {
   16459 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   16460 		return rv;
   16461 	}
   16462 
   16463 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   16464 	if (rv != 0)
   16465 		goto release;
   16466 
   16467 	/* Clear bits that enable EEE in various speeds */
   16468 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   16469 
   16470 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16471 		/* Save off link partner's EEE ability */
   16472 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   16473 		if (rv != 0)
   16474 			goto release;
   16475 
   16476 		/* Read EEE advertisement */
   16477 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   16478 			goto release;
   16479 
   16480 		/*
   16481 		 * Enable EEE only for speeds in which the link partner is
   16482 		 * EEE capable and for which we advertise EEE.
   16483 		 */
   16484 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   16485 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   16486 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   16487 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   16488 			if ((data & ANLPAR_TX_FD) != 0)
   16489 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   16490 			else {
   16491 				/*
   16492 				 * EEE is not supported in 100Half, so ignore
   16493 				 * partner's EEE in 100 ability if full-duplex
   16494 				 * is not advertised.
   16495 				 */
   16496 				sc->eee_lp_ability
   16497 				    &= ~AN_EEEADVERT_100_TX;
   16498 			}
   16499 		}
   16500 	}
   16501 
   16502 	if (sc->sc_phytype == WMPHY_82579) {
   16503 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   16504 		if (rv != 0)
   16505 			goto release;
   16506 
   16507 		data &= ~I82579_LPI_PLL_SHUT_100;
   16508 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   16509 	}
   16510 
   16511 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   16512 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   16513 		goto release;
   16514 
   16515 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   16516 release:
   16517 	sc->phy.release(sc);
   16518 
   16519 	return rv;
   16520 }
   16521 
   16522 static int
   16523 wm_set_eee(struct wm_softc *sc)
   16524 {
   16525 	struct ethercom *ec = &sc->sc_ethercom;
   16526 
   16527 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   16528 		return 0;
   16529 
   16530 	if (sc->sc_type == WM_T_I354) {
   16531 		/* I354 uses an external PHY */
   16532 		return 0; /* not yet */
   16533 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   16534 		return wm_set_eee_i350(sc);
   16535 	else if (sc->sc_type >= WM_T_PCH2)
   16536 		return wm_set_eee_pchlan(sc);
   16537 
   16538 	return 0;
   16539 }
   16540 
   16541 /*
   16542  * Workarounds (mainly PHY related).
   16543  * Basically, PHY's workarounds are in the PHY drivers.
   16544  */
   16545 
   16546 /* Workaround for 82566 Kumeran PCS lock loss */
   16547 static int
   16548 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   16549 {
   16550 	struct mii_data *mii = &sc->sc_mii;
   16551 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16552 	int i, reg, rv;
   16553 	uint16_t phyreg;
   16554 
   16555 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16556 		device_xname(sc->sc_dev), __func__));
   16557 
   16558 	/* If the link is not up, do nothing */
   16559 	if ((status & STATUS_LU) == 0)
   16560 		return 0;
   16561 
   16562 	/* Nothing to do if the link is other than 1Gbps */
   16563 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   16564 		return 0;
   16565 
   16566 	for (i = 0; i < 10; i++) {
   16567 		/* read twice */
   16568 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16569 		if (rv != 0)
   16570 			return rv;
   16571 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16572 		if (rv != 0)
   16573 			return rv;
   16574 
   16575 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   16576 			goto out;	/* GOOD! */
   16577 
   16578 		/* Reset the PHY */
   16579 		wm_reset_phy(sc);
   16580 		delay(5*1000);
   16581 	}
   16582 
   16583 	/* Disable GigE link negotiation */
   16584 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16585 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   16586 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16587 
   16588 	/*
   16589 	 * Call gig speed drop workaround on Gig disable before accessing
   16590 	 * any PHY registers.
   16591 	 */
   16592 	wm_gig_downshift_workaround_ich8lan(sc);
   16593 
   16594 out:
   16595 	return 0;
   16596 }
   16597 
   16598 /*
   16599  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   16600  *  @sc: pointer to the HW structure
   16601  *
   16602  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   16603  *  LPLU, Gig disable, MDIC PHY reset):
   16604  *    1) Set Kumeran Near-end loopback
   16605  *    2) Clear Kumeran Near-end loopback
   16606  *  Should only be called for ICH8[m] devices with any 1G Phy.
   16607  */
   16608 static void
   16609 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   16610 {
   16611 	uint16_t kmreg;
   16612 
   16613 	/* Only for igp3 */
   16614 	if (sc->sc_phytype == WMPHY_IGP_3) {
   16615 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   16616 			return;
   16617 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   16618 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   16619 			return;
   16620 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   16621 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   16622 	}
   16623 }
   16624 
   16625 /*
   16626  * Workaround for pch's PHYs
   16627  * XXX should be moved to new PHY driver?
   16628  */
   16629 static int
   16630 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16631 {
   16632 	device_t dev = sc->sc_dev;
   16633 	struct mii_data *mii = &sc->sc_mii;
   16634 	struct mii_softc *child;
   16635 	uint16_t phy_data, phyrev = 0;
   16636 	int phytype = sc->sc_phytype;
   16637 	int rv;
   16638 
   16639 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16640 		device_xname(dev), __func__));
   16641 	KASSERT(sc->sc_type == WM_T_PCH);
   16642 
   16643 	/* Set MDIO slow mode before any other MDIO access */
   16644 	if (phytype == WMPHY_82577)
   16645 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   16646 			return rv;
   16647 
   16648 	child = LIST_FIRST(&mii->mii_phys);
   16649 	if (child != NULL)
   16650 		phyrev = child->mii_mpd_rev;
   16651 
   16652 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   16653 	if ((child != NULL) &&
   16654 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   16655 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   16656 		/* Disable generation of early preamble (0x4431) */
   16657 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16658 		    &phy_data);
   16659 		if (rv != 0)
   16660 			return rv;
   16661 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   16662 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   16663 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16664 		    phy_data);
   16665 		if (rv != 0)
   16666 			return rv;
   16667 
   16668 		/* Preamble tuning for SSC */
   16669 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   16670 		if (rv != 0)
   16671 			return rv;
   16672 	}
   16673 
   16674 	/* 82578 */
   16675 	if (phytype == WMPHY_82578) {
   16676 		/*
   16677 		 * Return registers to default by doing a soft reset then
   16678 		 * writing 0x3140 to the control register
   16679 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   16680 		 */
   16681 		if ((child != NULL) && (phyrev < 2)) {
   16682 			PHY_RESET(child);
   16683 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   16684 			if (rv != 0)
   16685 				return rv;
   16686 		}
   16687 	}
   16688 
   16689 	/* Select page 0 */
   16690 	if ((rv = sc->phy.acquire(sc)) != 0)
   16691 		return rv;
   16692 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
   16693 	sc->phy.release(sc);
   16694 	if (rv != 0)
   16695 		return rv;
   16696 
   16697 	/*
   16698 	 * Configure the K1 Si workaround during phy reset assuming there is
   16699 	 * link so that it disables K1 if link is in 1Gbps.
   16700 	 */
   16701 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   16702 		return rv;
   16703 
   16704 	/* Workaround for link disconnects on a busy hub in half duplex */
   16705 	rv = sc->phy.acquire(sc);
   16706 	if (rv)
   16707 		return rv;
   16708 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   16709 	if (rv)
   16710 		goto release;
   16711 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   16712 	    phy_data & 0x00ff);
   16713 	if (rv)
   16714 		goto release;
   16715 
   16716 	/* Set MSE higher to enable link to stay up when noise is high */
   16717 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   16718 release:
   16719 	sc->phy.release(sc);
   16720 
   16721 	return rv;
   16722 }
   16723 
   16724 /*
   16725  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   16726  *  @sc:   pointer to the HW structure
   16727  */
   16728 static void
   16729 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   16730 {
   16731 
   16732 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16733 		device_xname(sc->sc_dev), __func__));
   16734 
   16735 	if (sc->phy.acquire(sc) != 0)
   16736 		return;
   16737 
   16738 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16739 
   16740 	sc->phy.release(sc);
   16741 }
   16742 
   16743 static void
   16744 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
   16745 {
   16746 	device_t dev = sc->sc_dev;
   16747 	uint32_t mac_reg;
   16748 	uint16_t i, wuce;
   16749 	int count;
   16750 
   16751 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16752 		device_xname(dev), __func__));
   16753 
   16754 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   16755 		return;
   16756 
   16757 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   16758 	count = wm_rar_count(sc);
   16759 	for (i = 0; i < count; i++) {
   16760 		uint16_t lo, hi;
   16761 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16762 		lo = (uint16_t)(mac_reg & 0xffff);
   16763 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   16764 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   16765 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   16766 
   16767 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16768 		lo = (uint16_t)(mac_reg & 0xffff);
   16769 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   16770 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   16771 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   16772 	}
   16773 
   16774 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   16775 }
   16776 
   16777 /*
   16778  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
   16779  *  with 82579 PHY
   16780  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
   16781  */
   16782 static int
   16783 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
   16784 {
   16785 	device_t dev = sc->sc_dev;
   16786 	int rar_count;
   16787 	int rv;
   16788 	uint32_t mac_reg;
   16789 	uint16_t dft_ctrl, data;
   16790 	uint16_t i;
   16791 
   16792 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16793 		device_xname(dev), __func__));
   16794 
   16795 	if (sc->sc_type < WM_T_PCH2)
   16796 		return 0;
   16797 
   16798 	/* Acquire PHY semaphore */
   16799 	rv = sc->phy.acquire(sc);
   16800 	if (rv != 0)
   16801 		return rv;
   16802 
   16803 	/* Disable Rx path while enabling/disabling workaround */
   16804 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
   16805 	if (rv != 0)
   16806 		goto out;
   16807 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16808 	    dft_ctrl | (1 << 14));
   16809 	if (rv != 0)
   16810 		goto out;
   16811 
   16812 	if (enable) {
   16813 		/* Write Rx addresses (rar_entry_count for RAL/H, and
   16814 		 * SHRAL/H) and initial CRC values to the MAC
   16815 		 */
   16816 		rar_count = wm_rar_count(sc);
   16817 		for (i = 0; i < rar_count; i++) {
   16818 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
   16819 			uint32_t addr_high, addr_low;
   16820 
   16821 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16822 			if (!(addr_high & RAL_AV))
   16823 				continue;
   16824 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16825 			mac_addr[0] = (addr_low & 0xFF);
   16826 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
   16827 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
   16828 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
   16829 			mac_addr[4] = (addr_high & 0xFF);
   16830 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
   16831 
   16832 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
   16833 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
   16834 		}
   16835 
   16836 		/* Write Rx addresses to the PHY */
   16837 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16838 	}
   16839 
   16840 	/*
   16841 	 * If enable ==
   16842 	 *	true: Enable jumbo frame workaround in the MAC.
   16843 	 *	false: Write MAC register values back to h/w defaults.
   16844 	 */
   16845 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
   16846 	if (enable) {
   16847 		mac_reg &= ~(1 << 14);
   16848 		mac_reg |= (7 << 15);
   16849 	} else
   16850 		mac_reg &= ~(0xf << 14);
   16851 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
   16852 
   16853 	mac_reg = CSR_READ(sc, WMREG_RCTL);
   16854 	if (enable) {
   16855 		mac_reg |= RCTL_SECRC;
   16856 		sc->sc_rctl |= RCTL_SECRC;
   16857 		sc->sc_flags |= WM_F_CRC_STRIP;
   16858 	} else {
   16859 		mac_reg &= ~RCTL_SECRC;
   16860 		sc->sc_rctl &= ~RCTL_SECRC;
   16861 		sc->sc_flags &= ~WM_F_CRC_STRIP;
   16862 	}
   16863 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
   16864 
   16865 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
   16866 	if (rv != 0)
   16867 		goto out;
   16868 	if (enable)
   16869 		data |= 1 << 0;
   16870 	else
   16871 		data &= ~(1 << 0);
   16872 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
   16873 	if (rv != 0)
   16874 		goto out;
   16875 
   16876 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
   16877 	if (rv != 0)
   16878 		goto out;
   16879 	/*
   16880 	 * XXX FreeBSD and Linux do the same thing that they set the same value
   16881 	 * on both the enable case and the disable case. Is it correct?
   16882 	 */
   16883 	data &= ~(0xf << 8);
   16884 	data |= (0xb << 8);
   16885 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
   16886 	if (rv != 0)
   16887 		goto out;
   16888 
   16889 	/*
   16890 	 * If enable ==
   16891 	 *	true: Enable jumbo frame workaround in the PHY.
   16892 	 *	false: Write PHY register values back to h/w defaults.
   16893 	 */
   16894 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
   16895 	if (rv != 0)
   16896 		goto out;
   16897 	data &= ~(0x7F << 5);
   16898 	if (enable)
   16899 		data |= (0x37 << 5);
   16900 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
   16901 	if (rv != 0)
   16902 		goto out;
   16903 
   16904 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
   16905 	if (rv != 0)
   16906 		goto out;
   16907 	if (enable)
   16908 		data &= ~(1 << 13);
   16909 	else
   16910 		data |= (1 << 13);
   16911 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
   16912 	if (rv != 0)
   16913 		goto out;
   16914 
   16915 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
   16916 	if (rv != 0)
   16917 		goto out;
   16918 	data &= ~(0x3FF << 2);
   16919 	if (enable)
   16920 		data |= (I82579_TX_PTR_GAP << 2);
   16921 	else
   16922 		data |= (0x8 << 2);
   16923 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
   16924 	if (rv != 0)
   16925 		goto out;
   16926 
   16927 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
   16928 	    enable ? 0xf100 : 0x7e00);
   16929 	if (rv != 0)
   16930 		goto out;
   16931 
   16932 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
   16933 	if (rv != 0)
   16934 		goto out;
   16935 	if (enable)
   16936 		data |= 1 << 10;
   16937 	else
   16938 		data &= ~(1 << 10);
   16939 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
   16940 	if (rv != 0)
   16941 		goto out;
   16942 
   16943 	/* Re-enable Rx path after enabling/disabling workaround */
   16944 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16945 	    dft_ctrl & ~(1 << 14));
   16946 
   16947 out:
   16948 	sc->phy.release(sc);
   16949 
   16950 	return rv;
   16951 }
   16952 
   16953 /*
   16954  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   16955  *  done after every PHY reset.
   16956  */
   16957 static int
   16958 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16959 {
   16960 	device_t dev = sc->sc_dev;
   16961 	int rv;
   16962 
   16963 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16964 		device_xname(dev), __func__));
   16965 	KASSERT(sc->sc_type == WM_T_PCH2);
   16966 
   16967 	/* Set MDIO slow mode before any other MDIO access */
   16968 	rv = wm_set_mdio_slow_mode_hv(sc);
   16969 	if (rv != 0)
   16970 		return rv;
   16971 
   16972 	rv = sc->phy.acquire(sc);
   16973 	if (rv != 0)
   16974 		return rv;
   16975 	/* Set MSE higher to enable link to stay up when noise is high */
   16976 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   16977 	if (rv != 0)
   16978 		goto release;
   16979 	/* Drop link after 5 times MSE threshold was reached */
   16980 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   16981 release:
   16982 	sc->phy.release(sc);
   16983 
   16984 	return rv;
   16985 }
   16986 
   16987 /**
   16988  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   16989  *  @link: link up bool flag
   16990  *
   16991  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   16992  *  preventing further DMA write requests.  Workaround the issue by disabling
   16993  *  the de-assertion of the clock request when in 1Gpbs mode.
   16994  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   16995  *  speeds in order to avoid Tx hangs.
   16996  **/
   16997 static int
   16998 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   16999 {
   17000 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   17001 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   17002 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   17003 	uint16_t phyreg;
   17004 
   17005 	if (link && (speed == STATUS_SPEED_1000)) {
   17006 		int rv;
   17007 
   17008 		rv = sc->phy.acquire(sc);
   17009 		if (rv != 0)
   17010 			return rv;
   17011 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17012 		    &phyreg);
   17013 		if (rv != 0)
   17014 			goto release;
   17015 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17016 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   17017 		if (rv != 0)
   17018 			goto release;
   17019 		delay(20);
   17020 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   17021 
   17022 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17023 		    &phyreg);
   17024 release:
   17025 		sc->phy.release(sc);
   17026 		return rv;
   17027 	}
   17028 
   17029 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   17030 
   17031 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   17032 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   17033 	    || !link
   17034 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   17035 		goto update_fextnvm6;
   17036 
   17037 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   17038 
   17039 	/* Clear link status transmit timeout */
   17040 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   17041 	if (speed == STATUS_SPEED_100) {
   17042 		/* Set inband Tx timeout to 5x10us for 100Half */
   17043 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   17044 
   17045 		/* Do not extend the K1 entry latency for 100Half */
   17046 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   17047 	} else {
   17048 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   17049 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   17050 
   17051 		/* Extend the K1 entry latency for 10 Mbps */
   17052 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   17053 	}
   17054 
   17055 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   17056 
   17057 update_fextnvm6:
   17058 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   17059 	return 0;
   17060 }
   17061 
   17062 /*
   17063  *  wm_k1_gig_workaround_hv - K1 Si workaround
   17064  *  @sc:   pointer to the HW structure
   17065  *  @link: link up bool flag
   17066  *
   17067  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   17068  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   17069  *  If link is down, the function will restore the default K1 setting located
   17070  *  in the NVM.
   17071  */
   17072 static int
   17073 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   17074 {
   17075 	int k1_enable = sc->sc_nvm_k1_enabled;
   17076 	int rv;
   17077 
   17078 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17079 		device_xname(sc->sc_dev), __func__));
   17080 
   17081 	rv = sc->phy.acquire(sc);
   17082 	if (rv != 0)
   17083 		return rv;
   17084 
   17085 	if (link) {
   17086 		k1_enable = 0;
   17087 
   17088 		/* Link stall fix for link up */
   17089 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   17090 		    0x0100);
   17091 	} else {
   17092 		/* Link stall fix for link down */
   17093 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   17094 		    0x4100);
   17095 	}
   17096 
   17097 	wm_configure_k1_ich8lan(sc, k1_enable);
   17098 	sc->phy.release(sc);
   17099 
   17100 	return 0;
   17101 }
   17102 
   17103 /*
   17104  *  wm_k1_workaround_lv - K1 Si workaround
   17105  *  @sc:   pointer to the HW structure
   17106  *
   17107  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   17108  *  Disable K1 for 1000 and 100 speeds
   17109  */
   17110 static int
   17111 wm_k1_workaround_lv(struct wm_softc *sc)
   17112 {
   17113 	uint32_t reg;
   17114 	uint16_t phyreg;
   17115 	int rv;
   17116 
   17117 	if (sc->sc_type != WM_T_PCH2)
   17118 		return 0;
   17119 
   17120 	/* Set K1 beacon duration based on 10Mbps speed */
   17121 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   17122 	if (rv != 0)
   17123 		return rv;
   17124 
   17125 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   17126 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   17127 		if (phyreg &
   17128 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   17129 			/* LV 1G/100 Packet drop issue wa  */
   17130 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   17131 			    &phyreg);
   17132 			if (rv != 0)
   17133 				return rv;
   17134 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   17135 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   17136 			    phyreg);
   17137 			if (rv != 0)
   17138 				return rv;
   17139 		} else {
   17140 			/* For 10Mbps */
   17141 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   17142 			reg &= ~FEXTNVM4_BEACON_DURATION;
   17143 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   17144 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   17145 		}
   17146 	}
   17147 
   17148 	return 0;
   17149 }
   17150 
   17151 /*
   17152  *  wm_link_stall_workaround_hv - Si workaround
   17153  *  @sc: pointer to the HW structure
   17154  *
   17155  *  This function works around a Si bug where the link partner can get
   17156  *  a link up indication before the PHY does. If small packets are sent
   17157  *  by the link partner they can be placed in the packet buffer without
   17158  *  being properly accounted for by the PHY and will stall preventing
   17159  *  further packets from being received.  The workaround is to clear the
   17160  *  packet buffer after the PHY detects link up.
   17161  */
   17162 static int
   17163 wm_link_stall_workaround_hv(struct wm_softc *sc)
   17164 {
   17165 	uint16_t phyreg;
   17166 
   17167 	if (sc->sc_phytype != WMPHY_82578)
   17168 		return 0;
   17169 
   17170 	/* Do not apply workaround if in PHY loopback bit 14 set */
   17171 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   17172 	if ((phyreg & BMCR_LOOP) != 0)
   17173 		return 0;
   17174 
   17175 	/* Check if link is up and at 1Gbps */
   17176 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   17177 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17178 	    | BM_CS_STATUS_SPEED_MASK;
   17179 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17180 		| BM_CS_STATUS_SPEED_1000))
   17181 		return 0;
   17182 
   17183 	delay(200 * 1000);	/* XXX too big */
   17184 
   17185 	/* Flush the packets in the fifo buffer */
   17186 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17187 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   17188 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17189 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   17190 
   17191 	return 0;
   17192 }
   17193 
   17194 static int
   17195 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   17196 {
   17197 	int rv;
   17198 
   17199 	rv = sc->phy.acquire(sc);
   17200 	if (rv != 0) {
   17201 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   17202 		    __func__);
   17203 		return rv;
   17204 	}
   17205 
   17206 	rv = wm_set_mdio_slow_mode_hv_locked(sc);
   17207 
   17208 	sc->phy.release(sc);
   17209 
   17210 	return rv;
   17211 }
   17212 
   17213 static int
   17214 wm_set_mdio_slow_mode_hv_locked(struct wm_softc *sc)
   17215 {
   17216 	int rv;
   17217 	uint16_t reg;
   17218 
   17219 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   17220 	if (rv != 0)
   17221 		return rv;
   17222 
   17223 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   17224 	    reg | HV_KMRN_MDIO_SLOW);
   17225 }
   17226 
   17227 /*
   17228  *  wm_configure_k1_ich8lan - Configure K1 power state
   17229  *  @sc: pointer to the HW structure
   17230  *  @enable: K1 state to configure
   17231  *
   17232  *  Configure the K1 power state based on the provided parameter.
   17233  *  Assumes semaphore already acquired.
   17234  */
   17235 static void
   17236 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   17237 {
   17238 	uint32_t ctrl, ctrl_ext, tmp;
   17239 	uint16_t kmreg;
   17240 	int rv;
   17241 
   17242 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17243 
   17244 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   17245 	if (rv != 0)
   17246 		return;
   17247 
   17248 	if (k1_enable)
   17249 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   17250 	else
   17251 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   17252 
   17253 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   17254 	if (rv != 0)
   17255 		return;
   17256 
   17257 	delay(20);
   17258 
   17259 	ctrl = CSR_READ(sc, WMREG_CTRL);
   17260 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   17261 
   17262 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   17263 	tmp |= CTRL_FRCSPD;
   17264 
   17265 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   17266 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   17267 	CSR_WRITE_FLUSH(sc);
   17268 	delay(20);
   17269 
   17270 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   17271 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   17272 	CSR_WRITE_FLUSH(sc);
   17273 	delay(20);
   17274 
   17275 	return;
   17276 }
   17277 
   17278 /* special case - for 82575 - need to do manual init ... */
   17279 static void
   17280 wm_reset_init_script_82575(struct wm_softc *sc)
   17281 {
   17282 	/*
   17283 	 * Remark: this is untested code - we have no board without EEPROM
   17284 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   17285 	 */
   17286 
   17287 	/* SerDes configuration via SERDESCTRL */
   17288 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   17289 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   17290 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   17291 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   17292 
   17293 	/* CCM configuration via CCMCTL register */
   17294 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   17295 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   17296 
   17297 	/* PCIe lanes configuration */
   17298 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   17299 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   17300 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   17301 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   17302 
   17303 	/* PCIe PLL Configuration */
   17304 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   17305 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   17306 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   17307 }
   17308 
   17309 static void
   17310 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   17311 {
   17312 	uint32_t reg;
   17313 	uint16_t nvmword;
   17314 	int rv;
   17315 
   17316 	if (sc->sc_type != WM_T_82580)
   17317 		return;
   17318 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   17319 		return;
   17320 
   17321 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   17322 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   17323 	if (rv != 0) {
   17324 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   17325 		    __func__);
   17326 		return;
   17327 	}
   17328 
   17329 	reg = CSR_READ(sc, WMREG_MDICNFG);
   17330 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   17331 		reg |= MDICNFG_DEST;
   17332 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   17333 		reg |= MDICNFG_COM_MDIO;
   17334 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17335 }
   17336 
   17337 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   17338 
   17339 static bool
   17340 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   17341 {
   17342 	uint32_t reg;
   17343 	uint16_t id1, id2;
   17344 	int i, rv;
   17345 
   17346 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17347 		device_xname(sc->sc_dev), __func__));
   17348 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17349 
   17350 	id1 = id2 = 0xffff;
   17351 	for (i = 0; i < 2; i++) {
   17352 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   17353 		    &id1);
   17354 		if ((rv != 0) || MII_INVALIDID(id1))
   17355 			continue;
   17356 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   17357 		    &id2);
   17358 		if ((rv != 0) || MII_INVALIDID(id2))
   17359 			continue;
   17360 		break;
   17361 	}
   17362 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   17363 		goto out;
   17364 
   17365 	/*
   17366 	 * In case the PHY needs to be in mdio slow mode,
   17367 	 * set slow mode and try to get the PHY id again.
   17368 	 */
   17369 	rv = 0;
   17370 	if (sc->sc_type < WM_T_PCH_LPT) {
   17371 		wm_set_mdio_slow_mode_hv_locked(sc);
   17372 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   17373 		    &id1);
   17374 		rv |= wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   17375 		    &id2);
   17376 	}
   17377 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   17378 		device_printf(sc->sc_dev, "XXX return with false\n");
   17379 		return false;
   17380 	}
   17381 out:
   17382 	if (sc->sc_type >= WM_T_PCH_LPT) {
   17383 		/* Only unforce SMBus if ME is not active */
   17384 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   17385 			uint16_t phyreg;
   17386 
   17387 			/* Unforce SMBus mode in PHY */
   17388 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   17389 			    CV_SMB_CTRL, &phyreg);
   17390 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   17391 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   17392 			    CV_SMB_CTRL, phyreg);
   17393 
   17394 			/* Unforce SMBus mode in MAC */
   17395 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17396 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   17397 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17398 		}
   17399 	}
   17400 	return true;
   17401 }
   17402 
   17403 static void
   17404 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   17405 {
   17406 	uint32_t reg;
   17407 	int i;
   17408 
   17409 	/* Set PHY Config Counter to 50msec */
   17410 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   17411 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   17412 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   17413 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   17414 
   17415 	/* Toggle LANPHYPC */
   17416 	reg = CSR_READ(sc, WMREG_CTRL);
   17417 	reg |= CTRL_LANPHYPC_OVERRIDE;
   17418 	reg &= ~CTRL_LANPHYPC_VALUE;
   17419 	CSR_WRITE(sc, WMREG_CTRL, reg);
   17420 	CSR_WRITE_FLUSH(sc);
   17421 	delay(1000);
   17422 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   17423 	CSR_WRITE(sc, WMREG_CTRL, reg);
   17424 	CSR_WRITE_FLUSH(sc);
   17425 
   17426 	if (sc->sc_type < WM_T_PCH_LPT)
   17427 		delay(50 * 1000);
   17428 	else {
   17429 		i = 20;
   17430 
   17431 		do {
   17432 			delay(5 * 1000);
   17433 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   17434 		    && i--);
   17435 
   17436 		delay(30 * 1000);
   17437 	}
   17438 }
   17439 
   17440 static int
   17441 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   17442 {
   17443 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   17444 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   17445 	uint32_t rxa;
   17446 	uint16_t scale = 0, lat_enc = 0;
   17447 	int32_t obff_hwm = 0;
   17448 	int64_t lat_ns, value;
   17449 
   17450 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17451 		device_xname(sc->sc_dev), __func__));
   17452 
   17453 	if (link) {
   17454 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   17455 		uint32_t status;
   17456 		uint16_t speed;
   17457 		pcireg_t preg;
   17458 
   17459 		status = CSR_READ(sc, WMREG_STATUS);
   17460 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   17461 		case STATUS_SPEED_10:
   17462 			speed = 10;
   17463 			break;
   17464 		case STATUS_SPEED_100:
   17465 			speed = 100;
   17466 			break;
   17467 		case STATUS_SPEED_1000:
   17468 			speed = 1000;
   17469 			break;
   17470 		default:
   17471 			device_printf(sc->sc_dev, "Unknown speed "
   17472 			    "(status = %08x)\n", status);
   17473 			return -1;
   17474 		}
   17475 
   17476 		/* Rx Packet Buffer Allocation size (KB) */
   17477 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   17478 
   17479 		/*
   17480 		 * Determine the maximum latency tolerated by the device.
   17481 		 *
   17482 		 * Per the PCIe spec, the tolerated latencies are encoded as
   17483 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   17484 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   17485 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   17486 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   17487 		 */
   17488 		lat_ns = ((int64_t)rxa * 1024 -
   17489 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   17490 			+ ETHER_HDR_LEN))) * 8 * 1000;
   17491 		if (lat_ns < 0)
   17492 			lat_ns = 0;
   17493 		else
   17494 			lat_ns /= speed;
   17495 		value = lat_ns;
   17496 
   17497 		while (value > LTRV_VALUE) {
   17498 			scale ++;
   17499 			value = howmany(value, __BIT(5));
   17500 		}
   17501 		if (scale > LTRV_SCALE_MAX) {
   17502 			device_printf(sc->sc_dev,
   17503 			    "Invalid LTR latency scale %d\n", scale);
   17504 			return -1;
   17505 		}
   17506 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   17507 
   17508 		/* Determine the maximum latency tolerated by the platform */
   17509 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17510 		    WM_PCI_LTR_CAP_LPT);
   17511 		max_snoop = preg & 0xffff;
   17512 		max_nosnoop = preg >> 16;
   17513 
   17514 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   17515 
   17516 		if (lat_enc > max_ltr_enc) {
   17517 			lat_enc = max_ltr_enc;
   17518 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   17519 			    * PCI_LTR_SCALETONS(
   17520 				    __SHIFTOUT(lat_enc,
   17521 					PCI_LTR_MAXSNOOPLAT_SCALE));
   17522 		}
   17523 
   17524 		if (lat_ns) {
   17525 			lat_ns *= speed * 1000;
   17526 			lat_ns /= 8;
   17527 			lat_ns /= 1000000000;
   17528 			obff_hwm = (int32_t)(rxa - lat_ns);
   17529 		}
   17530 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   17531 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   17532 			    "(rxa = %d, lat_ns = %d)\n",
   17533 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   17534 			return -1;
   17535 		}
   17536 	}
   17537 	/* Snoop and No-Snoop latencies the same */
   17538 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   17539 	CSR_WRITE(sc, WMREG_LTRV, reg);
   17540 
   17541 	/* Set OBFF high water mark */
   17542 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   17543 	reg |= obff_hwm;
   17544 	CSR_WRITE(sc, WMREG_SVT, reg);
   17545 
   17546 	/* Enable OBFF */
   17547 	reg = CSR_READ(sc, WMREG_SVCR);
   17548 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   17549 	CSR_WRITE(sc, WMREG_SVCR, reg);
   17550 
   17551 	return 0;
   17552 }
   17553 
   17554 /*
   17555  * I210 Errata 25 and I211 Errata 10
   17556  * Slow System Clock.
   17557  *
   17558  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   17559  */
   17560 static int
   17561 wm_pll_workaround_i210(struct wm_softc *sc)
   17562 {
   17563 	uint32_t mdicnfg, wuc;
   17564 	uint32_t reg;
   17565 	pcireg_t pcireg;
   17566 	uint32_t pmreg;
   17567 	uint16_t nvmword, tmp_nvmword;
   17568 	uint16_t phyval;
   17569 	bool wa_done = false;
   17570 	int i, rv = 0;
   17571 
   17572 	/* Get Power Management cap offset */
   17573 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   17574 	    &pmreg, NULL) == 0)
   17575 		return -1;
   17576 
   17577 	/* Save WUC and MDICNFG registers */
   17578 	wuc = CSR_READ(sc, WMREG_WUC);
   17579 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   17580 
   17581 	reg = mdicnfg & ~MDICNFG_DEST;
   17582 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17583 
   17584 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   17585 		/*
   17586 		 * The default value of the Initialization Control Word 1
   17587 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   17588 		 */
   17589 		nvmword = INVM_DEFAULT_AL;
   17590 	}
   17591 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   17592 
   17593 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   17594 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   17595 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   17596 
   17597 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   17598 			rv = 0;
   17599 			break; /* OK */
   17600 		} else
   17601 			rv = -1;
   17602 
   17603 		wa_done = true;
   17604 		/* Directly reset the internal PHY */
   17605 		reg = CSR_READ(sc, WMREG_CTRL);
   17606 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   17607 
   17608 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17609 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   17610 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17611 
   17612 		CSR_WRITE(sc, WMREG_WUC, 0);
   17613 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   17614 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17615 
   17616 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17617 		    pmreg + PCI_PMCSR);
   17618 		pcireg |= PCI_PMCSR_STATE_D3;
   17619 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17620 		    pmreg + PCI_PMCSR, pcireg);
   17621 		delay(1000);
   17622 		pcireg &= ~PCI_PMCSR_STATE_D3;
   17623 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17624 		    pmreg + PCI_PMCSR, pcireg);
   17625 
   17626 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   17627 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17628 
   17629 		/* Restore WUC register */
   17630 		CSR_WRITE(sc, WMREG_WUC, wuc);
   17631 	}
   17632 
   17633 	/* Restore MDICNFG setting */
   17634 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   17635 	if (wa_done)
   17636 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   17637 	return rv;
   17638 }
   17639 
   17640 static void
   17641 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   17642 {
   17643 	uint32_t reg;
   17644 
   17645 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17646 		device_xname(sc->sc_dev), __func__));
   17647 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   17648 	    || (sc->sc_type == WM_T_PCH_CNP));
   17649 
   17650 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   17651 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   17652 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   17653 
   17654 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   17655 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   17656 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   17657 }
   17658 
   17659 /* Sysctl functions */
   17660 static int
   17661 wm_sysctl_tdh_handler(SYSCTLFN_ARGS)
   17662 {
   17663 	struct sysctlnode node = *rnode;
   17664 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   17665 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   17666 	struct wm_softc *sc = txq->txq_sc;
   17667 	uint32_t reg;
   17668 
   17669 	reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id));
   17670 	node.sysctl_data = &reg;
   17671 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   17672 }
   17673 
   17674 static int
   17675 wm_sysctl_tdt_handler(SYSCTLFN_ARGS)
   17676 {
   17677 	struct sysctlnode node = *rnode;
   17678 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   17679 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   17680 	struct wm_softc *sc = txq->txq_sc;
   17681 	uint32_t reg;
   17682 
   17683 	reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id));
   17684 	node.sysctl_data = &reg;
   17685 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   17686 }
   17687 
   17688 #ifdef WM_DEBUG
   17689 static int
   17690 wm_sysctl_debug(SYSCTLFN_ARGS)
   17691 {
   17692 	struct sysctlnode node = *rnode;
   17693 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
   17694 	uint32_t dflags;
   17695 	int error;
   17696 
   17697 	dflags = sc->sc_debug;
   17698 	node.sysctl_data = &dflags;
   17699 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   17700 
   17701 	if (error || newp == NULL)
   17702 		return error;
   17703 
   17704 	sc->sc_debug = dflags;
   17705 	device_printf(sc->sc_dev, "TARC0: %08x\n", CSR_READ(sc, WMREG_TARC0));
   17706 	device_printf(sc->sc_dev, "TDT0: %08x\n", CSR_READ(sc, WMREG_TDT(0)));
   17707 
   17708 	return 0;
   17709 }
   17710 #endif
   17711