Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.768
      1 /*	$NetBSD: if_wm.c,v 1.768 2023/05/11 06:54:23 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.768 2023/05/11 06:54:23 msaitoh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_if_wm.h"
     89 #endif
     90 
     91 #include <sys/param.h>
     92 
     93 #include <sys/atomic.h>
     94 #include <sys/callout.h>
     95 #include <sys/cpu.h>
     96 #include <sys/device.h>
     97 #include <sys/errno.h>
     98 #include <sys/interrupt.h>
     99 #include <sys/ioctl.h>
    100 #include <sys/kernel.h>
    101 #include <sys/kmem.h>
    102 #include <sys/mbuf.h>
    103 #include <sys/pcq.h>
    104 #include <sys/queue.h>
    105 #include <sys/rndsource.h>
    106 #include <sys/socket.h>
    107 #include <sys/sysctl.h>
    108 #include <sys/syslog.h>
    109 #include <sys/systm.h>
    110 #include <sys/workqueue.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <net/rss_config.h>
    120 
    121 #include <netinet/in.h>			/* XXX for struct ip */
    122 #include <netinet/in_systm.h>		/* XXX for struct ip */
    123 #include <netinet/ip.h>			/* XXX for struct ip */
    124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    126 
    127 #include <sys/bus.h>
    128 #include <sys/intr.h>
    129 #include <machine/endian.h>
    130 
    131 #include <dev/mii/mii.h>
    132 #include <dev/mii/mdio.h>
    133 #include <dev/mii/miivar.h>
    134 #include <dev/mii/miidevs.h>
    135 #include <dev/mii/mii_bitbang.h>
    136 #include <dev/mii/ikphyreg.h>
    137 #include <dev/mii/igphyreg.h>
    138 #include <dev/mii/igphyvar.h>
    139 #include <dev/mii/inbmphyreg.h>
    140 #include <dev/mii/ihphyreg.h>
    141 #include <dev/mii/makphyreg.h>
    142 
    143 #include <dev/pci/pcireg.h>
    144 #include <dev/pci/pcivar.h>
    145 #include <dev/pci/pcidevs.h>
    146 
    147 #include <dev/pci/if_wmreg.h>
    148 #include <dev/pci/if_wmvar.h>
    149 
    150 #ifdef WM_DEBUG
    151 #define	WM_DEBUG_LINK		__BIT(0)
    152 #define	WM_DEBUG_TX		__BIT(1)
    153 #define	WM_DEBUG_RX		__BIT(2)
    154 #define	WM_DEBUG_GMII		__BIT(3)
    155 #define	WM_DEBUG_MANAGE		__BIT(4)
    156 #define	WM_DEBUG_NVM		__BIT(5)
    157 #define	WM_DEBUG_INIT		__BIT(6)
    158 #define	WM_DEBUG_LOCK		__BIT(7)
    159 
    160 #if 0
    161 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
    162 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
    163 	WM_DEBUG_LOCK
    164 #endif
    165 
    166 #define	DPRINTF(sc, x, y)			  \
    167 	do {					  \
    168 		if ((sc)->sc_debug & (x))	  \
    169 			printf y;		  \
    170 	} while (0)
    171 #else
    172 #define	DPRINTF(sc, x, y)	__nothing
    173 #endif /* WM_DEBUG */
    174 
    175 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    176 
    177 /*
    178  * This device driver's max interrupt numbers.
    179  */
    180 #define WM_MAX_NQUEUEINTR	16
    181 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    182 
    183 #ifndef WM_DISABLE_MSI
    184 #define	WM_DISABLE_MSI 0
    185 #endif
    186 #ifndef WM_DISABLE_MSIX
    187 #define	WM_DISABLE_MSIX 0
    188 #endif
    189 
    190 int wm_disable_msi = WM_DISABLE_MSI;
    191 int wm_disable_msix = WM_DISABLE_MSIX;
    192 
    193 #ifndef WM_WATCHDOG_TIMEOUT
    194 #define WM_WATCHDOG_TIMEOUT 5
    195 #endif
    196 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    197 
    198 /*
    199  * Transmit descriptor list size.  Due to errata, we can only have
    200  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    201  * on >= 82544. We tell the upper layers that they can queue a lot
    202  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    203  * of them at a time.
    204  *
    205  * We allow up to 64 DMA segments per packet.  Pathological packet
    206  * chains containing many small mbufs have been observed in zero-copy
    207  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    208  * m_defrag() is called to reduce it.
    209  */
    210 #define	WM_NTXSEGS		64
    211 #define	WM_IFQUEUELEN		256
    212 #define	WM_TXQUEUELEN_MAX	64
    213 #define	WM_TXQUEUELEN_MAX_82547	16
    214 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    215 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    216 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    217 #define	WM_NTXDESC_82542	256
    218 #define	WM_NTXDESC_82544	4096
    219 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    220 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    221 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    222 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    223 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    224 
    225 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    226 
    227 #define	WM_TXINTERQSIZE		256
    228 
    229 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    230 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    231 #endif
    232 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    233 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    234 #endif
    235 
    236 /*
    237  * Receive descriptor list size.  We have one Rx buffer for normal
    238  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    239  * packet.  We allocate 256 receive descriptors, each with a 2k
    240  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    241  */
    242 #define	WM_NRXDESC		256U
    243 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    244 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    245 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    246 
    247 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    248 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    249 #endif
    250 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    251 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    252 #endif
    253 
    254 typedef union txdescs {
    255 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    256 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    257 } txdescs_t;
    258 
    259 typedef union rxdescs {
    260 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    261 	ext_rxdesc_t	 sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    262 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    263 } rxdescs_t;
    264 
    265 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    266 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    267 
    268 /*
    269  * Software state for transmit jobs.
    270  */
    271 struct wm_txsoft {
    272 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    273 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    274 	int txs_firstdesc;		/* first descriptor in packet */
    275 	int txs_lastdesc;		/* last descriptor in packet */
    276 	int txs_ndesc;			/* # of descriptors used */
    277 };
    278 
    279 /*
    280  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    281  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    282  * them together.
    283  */
    284 struct wm_rxsoft {
    285 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    286 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    287 };
    288 
    289 #define WM_LINKUP_TIMEOUT	50
    290 
    291 static uint16_t swfwphysem[] = {
    292 	SWFW_PHY0_SM,
    293 	SWFW_PHY1_SM,
    294 	SWFW_PHY2_SM,
    295 	SWFW_PHY3_SM
    296 };
    297 
    298 static const uint32_t wm_82580_rxpbs_table[] = {
    299 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    300 };
    301 
    302 struct wm_softc;
    303 
    304 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
    305 #if !defined(WM_EVENT_COUNTERS)
    306 #define WM_EVENT_COUNTERS 1
    307 #endif
    308 #endif
    309 
    310 #ifdef WM_EVENT_COUNTERS
    311 #define WM_Q_EVCNT_DEFINE(qname, evname)				 \
    312 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    313 	struct evcnt qname##_ev_##evname
    314 
    315 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    316 	do {								\
    317 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    318 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    319 		    "%s%02d%s", #qname, (qnum), #evname);		\
    320 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    321 		    (evtype), NULL, (xname),				\
    322 		    (q)->qname##_##evname##_evcnt_name);		\
    323 	} while (0)
    324 
    325 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    326 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    327 
    328 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    329 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    330 
    331 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    332 	evcnt_detach(&(q)->qname##_ev_##evname)
    333 #endif /* WM_EVENT_COUNTERS */
    334 
    335 struct wm_txqueue {
    336 	kmutex_t *txq_lock;		/* lock for tx operations */
    337 
    338 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    339 
    340 	/* Software state for the transmit descriptors. */
    341 	int txq_num;			/* must be a power of two */
    342 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    343 
    344 	/* TX control data structures. */
    345 	int txq_ndesc;			/* must be a power of two */
    346 	size_t txq_descsize;		/* a tx descriptor size */
    347 	txdescs_t *txq_descs_u;
    348 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    349 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    350 	int txq_desc_rseg;		/* real number of control segment */
    351 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    352 #define	txq_descs	txq_descs_u->sctxu_txdescs
    353 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    354 
    355 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    356 
    357 	int txq_free;			/* number of free Tx descriptors */
    358 	int txq_next;			/* next ready Tx descriptor */
    359 
    360 	int txq_sfree;			/* number of free Tx jobs */
    361 	int txq_snext;			/* next free Tx job */
    362 	int txq_sdirty;			/* dirty Tx jobs */
    363 
    364 	/* These 4 variables are used only on the 82547. */
    365 	int txq_fifo_size;		/* Tx FIFO size */
    366 	int txq_fifo_head;		/* current head of FIFO */
    367 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    368 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    369 
    370 	/*
    371 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    372 	 * CPUs. This queue intermediate them without block.
    373 	 */
    374 	pcq_t *txq_interq;
    375 
    376 	/*
    377 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    378 	 * to manage Tx H/W queue's busy flag.
    379 	 */
    380 	int txq_flags;			/* flags for H/W queue, see below */
    381 #define	WM_TXQ_NO_SPACE		0x1
    382 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
    383 
    384 	bool txq_stopping;
    385 
    386 	bool txq_sending;
    387 	time_t txq_lastsent;
    388 
    389 	/* Checksum flags used for previous packet */
    390 	uint32_t	txq_last_hw_cmd;
    391 	uint8_t		txq_last_hw_fields;
    392 	uint16_t	txq_last_hw_ipcs;
    393 	uint16_t	txq_last_hw_tucs;
    394 
    395 	uint32_t txq_packets;		/* for AIM */
    396 	uint32_t txq_bytes;		/* for AIM */
    397 #ifdef WM_EVENT_COUNTERS
    398 	/* TX event counters */
    399 	WM_Q_EVCNT_DEFINE(txq, txsstall);   /* Stalled due to no txs */
    400 	WM_Q_EVCNT_DEFINE(txq, txdstall);   /* Stalled due to no txd */
    401 	WM_Q_EVCNT_DEFINE(txq, fifo_stall); /* FIFO stalls (82547) */
    402 	WM_Q_EVCNT_DEFINE(txq, txdw);	    /* Tx descriptor interrupts */
    403 	WM_Q_EVCNT_DEFINE(txq, txqe);	    /* Tx queue empty interrupts */
    404 					    /* XXX not used? */
    405 
    406 	WM_Q_EVCNT_DEFINE(txq, ipsum);	    /* IP checksums comp. */
    407 	WM_Q_EVCNT_DEFINE(txq, tusum);	    /* TCP/UDP cksums comp. */
    408 	WM_Q_EVCNT_DEFINE(txq, tusum6);	    /* TCP/UDP v6 cksums comp. */
    409 	WM_Q_EVCNT_DEFINE(txq, tso);	    /* TCP seg offload (IPv4) */
    410 	WM_Q_EVCNT_DEFINE(txq, tso6);	    /* TCP seg offload (IPv6) */
    411 	WM_Q_EVCNT_DEFINE(txq, tsopain);    /* Painful header manip. for TSO */
    412 	WM_Q_EVCNT_DEFINE(txq, pcqdrop);    /* Pkt dropped in pcq */
    413 	WM_Q_EVCNT_DEFINE(txq, descdrop);   /* Pkt dropped in MAC desc ring */
    414 					    /* other than toomanyseg */
    415 
    416 	WM_Q_EVCNT_DEFINE(txq, toomanyseg); /* Pkt dropped(toomany DMA segs) */
    417 	WM_Q_EVCNT_DEFINE(txq, defrag);	    /* m_defrag() */
    418 	WM_Q_EVCNT_DEFINE(txq, underrun);   /* Tx underrun */
    419 	WM_Q_EVCNT_DEFINE(txq, skipcontext); /* Tx skip wrong cksum context */
    420 
    421 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    422 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    423 #endif /* WM_EVENT_COUNTERS */
    424 };
    425 
    426 struct wm_rxqueue {
    427 	kmutex_t *rxq_lock;		/* lock for rx operations */
    428 
    429 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    430 
    431 	/* Software state for the receive descriptors. */
    432 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    433 
    434 	/* RX control data structures. */
    435 	int rxq_ndesc;			/* must be a power of two */
    436 	size_t rxq_descsize;		/* a rx descriptor size */
    437 	rxdescs_t *rxq_descs_u;
    438 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    439 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    440 	int rxq_desc_rseg;		/* real number of control segment */
    441 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    442 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    443 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    444 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    445 
    446 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    447 
    448 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    449 	int rxq_discard;
    450 	int rxq_len;
    451 	struct mbuf *rxq_head;
    452 	struct mbuf *rxq_tail;
    453 	struct mbuf **rxq_tailp;
    454 
    455 	bool rxq_stopping;
    456 
    457 	uint32_t rxq_packets;		/* for AIM */
    458 	uint32_t rxq_bytes;		/* for AIM */
    459 #ifdef WM_EVENT_COUNTERS
    460 	/* RX event counters */
    461 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    462 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    463 
    464 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    465 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    466 #endif
    467 };
    468 
    469 struct wm_queue {
    470 	int wmq_id;			/* index of TX/RX queues */
    471 	int wmq_intr_idx;		/* index of MSI-X tables */
    472 
    473 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    474 	bool wmq_set_itr;
    475 
    476 	struct wm_txqueue wmq_txq;
    477 	struct wm_rxqueue wmq_rxq;
    478 	char sysctlname[32];		/* Name for sysctl */
    479 
    480 	bool wmq_txrx_use_workqueue;
    481 	bool wmq_wq_enqueued;
    482 	struct work wmq_cookie;
    483 	void *wmq_si;
    484 };
    485 
    486 struct wm_phyop {
    487 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
    488 	void (*release)(struct wm_softc *);
    489 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    490 	int (*writereg_locked)(device_t, int, int, uint16_t);
    491 	int reset_delay_us;
    492 	bool no_errprint;
    493 };
    494 
    495 struct wm_nvmop {
    496 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
    497 	void (*release)(struct wm_softc *);
    498 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    499 };
    500 
    501 /*
    502  * Software state per device.
    503  */
    504 struct wm_softc {
    505 	device_t sc_dev;		/* generic device information */
    506 	bus_space_tag_t sc_st;		/* bus space tag */
    507 	bus_space_handle_t sc_sh;	/* bus space handle */
    508 	bus_size_t sc_ss;		/* bus space size */
    509 	bus_space_tag_t sc_iot;		/* I/O space tag */
    510 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    511 	bus_size_t sc_ios;		/* I/O space size */
    512 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    513 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    514 	bus_size_t sc_flashs;		/* flash registers space size */
    515 	off_t sc_flashreg_offset;	/*
    516 					 * offset to flash registers from
    517 					 * start of BAR
    518 					 */
    519 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    520 
    521 	struct ethercom sc_ethercom;	/* Ethernet common data */
    522 	struct mii_data sc_mii;		/* MII/media information */
    523 
    524 	pci_chipset_tag_t sc_pc;
    525 	pcitag_t sc_pcitag;
    526 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    527 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    528 
    529 	uint16_t sc_pcidevid;		/* PCI device ID */
    530 	wm_chip_type sc_type;		/* MAC type */
    531 	int sc_rev;			/* MAC revision */
    532 	wm_phy_type sc_phytype;		/* PHY type */
    533 	uint8_t sc_sfptype;		/* SFP type */
    534 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    535 #define	WM_MEDIATYPE_UNKNOWN		0x00
    536 #define	WM_MEDIATYPE_FIBER		0x01
    537 #define	WM_MEDIATYPE_COPPER		0x02
    538 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    539 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    540 	int sc_flags;			/* flags; see below */
    541 	u_short sc_if_flags;		/* last if_flags */
    542 	int sc_ec_capenable;		/* last ec_capenable */
    543 	int sc_flowflags;		/* 802.3x flow control flags */
    544 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    545 	int sc_align_tweak;
    546 
    547 	void *sc_ihs[WM_MAX_NINTR];	/*
    548 					 * interrupt cookie.
    549 					 * - legacy and msi use sc_ihs[0] only
    550 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    551 					 */
    552 	pci_intr_handle_t *sc_intrs;	/*
    553 					 * legacy and msi use sc_intrs[0] only
    554 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    555 					 */
    556 	int sc_nintrs;			/* number of interrupts */
    557 
    558 	int sc_link_intr_idx;		/* index of MSI-X tables */
    559 
    560 	callout_t sc_tick_ch;		/* tick callout */
    561 	bool sc_core_stopping;
    562 
    563 	int sc_nvm_ver_major;
    564 	int sc_nvm_ver_minor;
    565 	int sc_nvm_ver_build;
    566 	int sc_nvm_addrbits;		/* NVM address bits */
    567 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    568 	int sc_ich8_flash_base;
    569 	int sc_ich8_flash_bank_size;
    570 	int sc_nvm_k1_enabled;
    571 
    572 	int sc_nqueues;
    573 	struct wm_queue *sc_queue;
    574 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    575 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    576 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    577 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    578 	struct workqueue *sc_queue_wq;
    579 	bool sc_txrx_use_workqueue;
    580 
    581 	int sc_affinity_offset;
    582 
    583 #ifdef WM_EVENT_COUNTERS
    584 	/* Event counters. */
    585 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    586 
    587 	/* >= WM_T_82542_2_1 */
    588 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    589 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    590 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    591 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    592 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    593 
    594 	struct evcnt sc_ev_crcerrs;	/* CRC Error */
    595 	struct evcnt sc_ev_algnerrc;	/* Alignment Error */
    596 	struct evcnt sc_ev_symerrc;	/* Symbol Error */
    597 	struct evcnt sc_ev_rxerrc;	/* Receive Error */
    598 	struct evcnt sc_ev_mpc;		/* Missed Packets */
    599 	struct evcnt sc_ev_colc;	/* Collision */
    600 	struct evcnt sc_ev_sec;		/* Sequence Error */
    601 	struct evcnt sc_ev_cexterr;	/* Carrier Extension Error */
    602 	struct evcnt sc_ev_rlec;	/* Receive Length Error */
    603 	struct evcnt sc_ev_scc;		/* Single Collision */
    604 	struct evcnt sc_ev_ecol;	/* Excessive Collision */
    605 	struct evcnt sc_ev_mcc;		/* Multiple Collision */
    606 	struct evcnt sc_ev_latecol;	/* Late Collision */
    607 	struct evcnt sc_ev_dc;		/* Defer */
    608 	struct evcnt sc_ev_gprc;	/* Good Packets Rx */
    609 	struct evcnt sc_ev_bprc;	/* Broadcast Packets Rx */
    610 	struct evcnt sc_ev_mprc;	/* Multicast Packets Rx */
    611 	struct evcnt sc_ev_gptc;	/* Good Packets Tx */
    612 	struct evcnt sc_ev_gorc;	/* Good Octets Rx */
    613 	struct evcnt sc_ev_gotc;	/* Good Octets Tx */
    614 	struct evcnt sc_ev_rnbc;	/* Rx No Buffers */
    615 	struct evcnt sc_ev_ruc;		/* Rx Undersize */
    616 	struct evcnt sc_ev_rfc;		/* Rx Fragment */
    617 	struct evcnt sc_ev_roc;		/* Rx Oversize */
    618 	struct evcnt sc_ev_rjc;		/* Rx Jabber */
    619 	struct evcnt sc_ev_tor;		/* Total Octets Rx */
    620 	struct evcnt sc_ev_tot;		/* Total Octets Tx */
    621 	struct evcnt sc_ev_tpr;		/* Total Packets Rx */
    622 	struct evcnt sc_ev_tpt;		/* Total Packets Tx */
    623 	struct evcnt sc_ev_mptc;	/* Multicast Packets Tx */
    624 	struct evcnt sc_ev_bptc;	/* Broadcast Packets Tx Count */
    625 	struct evcnt sc_ev_prc64;	/* Packets Rx (64 bytes) */
    626 	struct evcnt sc_ev_prc127;	/* Packets Rx (65-127 bytes) */
    627 	struct evcnt sc_ev_prc255;	/* Packets Rx (128-255 bytes) */
    628 	struct evcnt sc_ev_prc511;	/* Packets Rx (255-511 bytes) */
    629 	struct evcnt sc_ev_prc1023;	/* Packets Rx (512-1023 bytes) */
    630 	struct evcnt sc_ev_prc1522;	/* Packets Rx (1024-1522 bytes) */
    631 	struct evcnt sc_ev_ptc64;	/* Packets Tx (64 bytes) */
    632 	struct evcnt sc_ev_ptc127;	/* Packets Tx (65-127 bytes) */
    633 	struct evcnt sc_ev_ptc255;	/* Packets Tx (128-255 bytes) */
    634 	struct evcnt sc_ev_ptc511;	/* Packets Tx (256-511 bytes) */
    635 	struct evcnt sc_ev_ptc1023;	/* Packets Tx (512-1023 bytes) */
    636 	struct evcnt sc_ev_ptc1522;	/* Packets Tx (1024-1522 Bytes) */
    637 	struct evcnt sc_ev_iac;		/* Interrupt Assertion */
    638 	struct evcnt sc_ev_icrxptc;	/* Intr. Cause Rx Pkt Timer Expire */
    639 	struct evcnt sc_ev_icrxatc;	/* Intr. Cause Rx Abs Timer Expire */
    640 	struct evcnt sc_ev_ictxptc;	/* Intr. Cause Tx Pkt Timer Expire */
    641 	struct evcnt sc_ev_ictxact;	/* Intr. Cause Tx Abs Timer Expire */
    642 	struct evcnt sc_ev_ictxqec;	/* Intr. Cause Tx Queue Empty */
    643 	struct evcnt sc_ev_ictxqmtc;	/* Intr. Cause Tx Queue Min Thresh */
    644 	struct evcnt sc_ev_icrxdmtc;	/* Intr. Cause Rx Desc Min Thresh */
    645 	struct evcnt sc_ev_icrxoc;	/* Intr. Cause Receiver Overrun */
    646 	struct evcnt sc_ev_tncrs;	/* Tx-No CRS */
    647 	struct evcnt sc_ev_tsctc;	/* TCP Segmentation Context Tx */
    648 	struct evcnt sc_ev_tsctfc;	/* TCP Segmentation Context Tx Fail */
    649 	struct evcnt sc_ev_mgtprc;	/* Management Packets RX */
    650 	struct evcnt sc_ev_mgtpdc;	/* Management Packets Dropped */
    651 	struct evcnt sc_ev_mgtptc;	/* Management Packets TX */
    652 	struct evcnt sc_ev_b2ogprc;	/* BMC2OS pkts received by host */
    653 	struct evcnt sc_ev_o2bspc;	/* OS2BMC pkts transmitted by host */
    654 	struct evcnt sc_ev_b2ospc;	/* BMC2OS pkts sent by BMC */
    655 	struct evcnt sc_ev_o2bgptc;	/* OS2BMC pkts received by BMC */
    656 
    657 #endif /* WM_EVENT_COUNTERS */
    658 
    659 	struct sysctllog *sc_sysctllog;
    660 
    661 	/* This variable are used only on the 82547. */
    662 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    663 
    664 	uint32_t sc_ctrl;		/* prototype CTRL register */
    665 #if 0
    666 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    667 #endif
    668 	uint32_t sc_icr;		/* prototype interrupt bits */
    669 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    670 	uint32_t sc_tctl;		/* prototype TCTL register */
    671 	uint32_t sc_rctl;		/* prototype RCTL register */
    672 	uint32_t sc_txcw;		/* prototype TXCW register */
    673 	uint32_t sc_tipg;		/* prototype TIPG register */
    674 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    675 	uint32_t sc_pba;		/* prototype PBA register */
    676 
    677 	int sc_tbi_linkup;		/* TBI link status */
    678 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    679 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    680 
    681 	int sc_mchash_type;		/* multicast filter offset */
    682 
    683 	krndsource_t rnd_source;	/* random source */
    684 
    685 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    686 
    687 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    688 	kmutex_t *sc_ich_phymtx;	/*
    689 					 * 82574/82583/ICH/PCH specific PHY
    690 					 * mutex. For 82574/82583, the mutex
    691 					 * is used for both PHY and NVM.
    692 					 */
    693 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    694 
    695 	struct wm_phyop phy;
    696 	struct wm_nvmop nvm;
    697 
    698 	struct workqueue *sc_reset_wq;
    699 	struct work sc_reset_work;
    700 	volatile unsigned sc_reset_pending;
    701 
    702 	bool sc_dying;
    703 
    704 #ifdef WM_DEBUG
    705 	uint32_t sc_debug;
    706 	bool sc_trigger_reset;
    707 #endif
    708 };
    709 
    710 #define	WM_RXCHAIN_RESET(rxq)						\
    711 do {									\
    712 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    713 	*(rxq)->rxq_tailp = NULL;					\
    714 	(rxq)->rxq_len = 0;						\
    715 } while (/*CONSTCOND*/0)
    716 
    717 #define	WM_RXCHAIN_LINK(rxq, m)						\
    718 do {									\
    719 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    720 	(rxq)->rxq_tailp = &(m)->m_next;				\
    721 } while (/*CONSTCOND*/0)
    722 
    723 #ifdef WM_EVENT_COUNTERS
    724 #ifdef __HAVE_ATOMIC64_LOADSTORE
    725 #define	WM_EVCNT_INCR(ev)						\
    726 	atomic_store_relaxed(&((ev)->ev_count),				\
    727 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
    728 #define	WM_EVCNT_ADD(ev, val)						\
    729 	atomic_store_relaxed(&((ev)->ev_count),				\
    730 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
    731 #else
    732 #define	WM_EVCNT_INCR(ev)						\
    733 	((ev)->ev_count)++
    734 #define	WM_EVCNT_ADD(ev, val)						\
    735 	(ev)->ev_count += (val)
    736 #endif
    737 
    738 #define WM_Q_EVCNT_INCR(qname, evname)			\
    739 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    740 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    741 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    742 #else /* !WM_EVENT_COUNTERS */
    743 #define	WM_EVCNT_INCR(ev)	/* nothing */
    744 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    745 
    746 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    747 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    748 #endif /* !WM_EVENT_COUNTERS */
    749 
    750 #define	CSR_READ(sc, reg)						\
    751 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    752 #define	CSR_WRITE(sc, reg, val)						\
    753 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    754 #define	CSR_WRITE_FLUSH(sc)						\
    755 	(void)CSR_READ((sc), WMREG_STATUS)
    756 
    757 #define ICH8_FLASH_READ32(sc, reg)					\
    758 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    759 	    (reg) + sc->sc_flashreg_offset)
    760 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    761 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    762 	    (reg) + sc->sc_flashreg_offset, (data))
    763 
    764 #define ICH8_FLASH_READ16(sc, reg)					\
    765 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    766 	    (reg) + sc->sc_flashreg_offset)
    767 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    768 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    769 	    (reg) + sc->sc_flashreg_offset, (data))
    770 
    771 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    772 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    773 
    774 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    775 #define	WM_CDTXADDR_HI(txq, x)						\
    776 	(sizeof(bus_addr_t) == 8 ?					\
    777 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    778 
    779 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    780 #define	WM_CDRXADDR_HI(rxq, x)						\
    781 	(sizeof(bus_addr_t) == 8 ?					\
    782 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    783 
    784 /*
    785  * Register read/write functions.
    786  * Other than CSR_{READ|WRITE}().
    787  */
    788 #if 0
    789 static inline uint32_t wm_io_read(struct wm_softc *, int);
    790 #endif
    791 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    792 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    793     uint32_t, uint32_t);
    794 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    795 
    796 /*
    797  * Descriptor sync/init functions.
    798  */
    799 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    800 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    801 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    802 
    803 /*
    804  * Device driver interface functions and commonly used functions.
    805  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    806  */
    807 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    808 static int	wm_match(device_t, cfdata_t, void *);
    809 static void	wm_attach(device_t, device_t, void *);
    810 static int	wm_detach(device_t, int);
    811 static bool	wm_suspend(device_t, const pmf_qual_t *);
    812 static bool	wm_resume(device_t, const pmf_qual_t *);
    813 static bool	wm_watchdog(struct ifnet *);
    814 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    815     uint16_t *);
    816 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    817     uint16_t *);
    818 static void	wm_tick(void *);
    819 static int	wm_ifflags_cb(struct ethercom *);
    820 static int	wm_ioctl(struct ifnet *, u_long, void *);
    821 /* MAC address related */
    822 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    823 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    824 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    825 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    826 static int	wm_rar_count(struct wm_softc *);
    827 static void	wm_set_filter(struct wm_softc *);
    828 /* Reset and init related */
    829 static void	wm_set_vlan(struct wm_softc *);
    830 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    831 static void	wm_get_auto_rd_done(struct wm_softc *);
    832 static void	wm_lan_init_done(struct wm_softc *);
    833 static void	wm_get_cfg_done(struct wm_softc *);
    834 static int	wm_phy_post_reset(struct wm_softc *);
    835 static int	wm_write_smbus_addr(struct wm_softc *);
    836 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    837 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    838 static void	wm_initialize_hardware_bits(struct wm_softc *);
    839 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    840 static int	wm_reset_phy(struct wm_softc *);
    841 static void	wm_flush_desc_rings(struct wm_softc *);
    842 static void	wm_reset(struct wm_softc *);
    843 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    844 static void	wm_rxdrain(struct wm_rxqueue *);
    845 static void	wm_init_rss(struct wm_softc *);
    846 static void	wm_adjust_qnum(struct wm_softc *, int);
    847 static inline bool	wm_is_using_msix(struct wm_softc *);
    848 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    849 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    850 static int	wm_setup_legacy(struct wm_softc *);
    851 static int	wm_setup_msix(struct wm_softc *);
    852 static int	wm_init(struct ifnet *);
    853 static int	wm_init_locked(struct ifnet *);
    854 static void	wm_init_sysctls(struct wm_softc *);
    855 static void	wm_unset_stopping_flags(struct wm_softc *);
    856 static void	wm_set_stopping_flags(struct wm_softc *);
    857 static void	wm_stop(struct ifnet *, int);
    858 static void	wm_stop_locked(struct ifnet *, bool, bool);
    859 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    860 static void	wm_82547_txfifo_stall(void *);
    861 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    862 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    863 /* DMA related */
    864 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    865 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    866 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    867 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    868     struct wm_txqueue *);
    869 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    870 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    871 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    872     struct wm_rxqueue *);
    873 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    874 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    875 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    876 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    877 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    878 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    879 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    880     struct wm_txqueue *);
    881 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    882     struct wm_rxqueue *);
    883 static int	wm_alloc_txrx_queues(struct wm_softc *);
    884 static void	wm_free_txrx_queues(struct wm_softc *);
    885 static int	wm_init_txrx_queues(struct wm_softc *);
    886 /* Start */
    887 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    888     struct wm_txsoft *, uint32_t *, uint8_t *);
    889 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    890 static void	wm_start(struct ifnet *);
    891 static void	wm_start_locked(struct ifnet *);
    892 static int	wm_transmit(struct ifnet *, struct mbuf *);
    893 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    894 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    895     bool);
    896 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    897     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    898 static void	wm_nq_start(struct ifnet *);
    899 static void	wm_nq_start_locked(struct ifnet *);
    900 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    901 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    902 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    903     bool);
    904 static void	wm_deferred_start_locked(struct wm_txqueue *);
    905 static void	wm_handle_queue(void *);
    906 static void	wm_handle_queue_work(struct work *, void *);
    907 static void	wm_handle_reset_work(struct work *, void *);
    908 /* Interrupt */
    909 static bool	wm_txeof(struct wm_txqueue *, u_int);
    910 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    911 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    912 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    913 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    914 static void	wm_linkintr(struct wm_softc *, uint32_t);
    915 static int	wm_intr_legacy(void *);
    916 static inline void	wm_txrxintr_disable(struct wm_queue *);
    917 static inline void	wm_txrxintr_enable(struct wm_queue *);
    918 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    919 static int	wm_txrxintr_msix(void *);
    920 static int	wm_linkintr_msix(void *);
    921 
    922 /*
    923  * Media related.
    924  * GMII, SGMII, TBI, SERDES and SFP.
    925  */
    926 /* Common */
    927 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    928 /* GMII related */
    929 static void	wm_gmii_reset(struct wm_softc *);
    930 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    931 static int	wm_get_phy_id_82575(struct wm_softc *);
    932 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    933 static int	wm_gmii_mediachange(struct ifnet *);
    934 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    935 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    936 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    937 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    938 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    939 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    940 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    941 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    942 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    943 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    944 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    945 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    946 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    947 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    948 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    949 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    950 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    951 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    952 	bool);
    953 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    954 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    955 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    956 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    957 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    958 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    959 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    960 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    961 static void	wm_gmii_statchg(struct ifnet *);
    962 /*
    963  * kumeran related (80003, ICH* and PCH*).
    964  * These functions are not for accessing MII registers but for accessing
    965  * kumeran specific registers.
    966  */
    967 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    968 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    969 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    970 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    971 /* EMI register related */
    972 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    973 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    974 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    975 /* SGMII */
    976 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    977 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
    978 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    979 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    980 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    981 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    982 /* TBI related */
    983 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    984 static void	wm_tbi_mediainit(struct wm_softc *);
    985 static int	wm_tbi_mediachange(struct ifnet *);
    986 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    987 static int	wm_check_for_link(struct wm_softc *);
    988 static void	wm_tbi_tick(struct wm_softc *);
    989 /* SERDES related */
    990 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    991 static int	wm_serdes_mediachange(struct ifnet *);
    992 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    993 static void	wm_serdes_tick(struct wm_softc *);
    994 /* SFP related */
    995 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    996 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    997 
    998 /*
    999  * NVM related.
   1000  * Microwire, SPI (w/wo EERD) and Flash.
   1001  */
   1002 /* Misc functions */
   1003 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
   1004 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
   1005 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
   1006 /* Microwire */
   1007 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
   1008 /* SPI */
   1009 static int	wm_nvm_ready_spi(struct wm_softc *);
   1010 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
   1011 /* Using with EERD */
   1012 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
   1013 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
   1014 /* Flash */
   1015 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
   1016     unsigned int *);
   1017 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
   1018 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
   1019 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
   1020     uint32_t *);
   1021 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
   1022 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
   1023 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
   1024 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
   1025 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
   1026 /* iNVM */
   1027 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
   1028 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
   1029 /* Lock, detecting NVM type, validate checksum and read */
   1030 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
   1031 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
   1032 static int	wm_nvm_validate_checksum(struct wm_softc *);
   1033 static void	wm_nvm_version_invm(struct wm_softc *);
   1034 static void	wm_nvm_version(struct wm_softc *);
   1035 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
   1036 
   1037 /*
   1038  * Hardware semaphores.
   1039  * Very complexed...
   1040  */
   1041 static int	wm_get_null(struct wm_softc *);
   1042 static void	wm_put_null(struct wm_softc *);
   1043 static int	wm_get_eecd(struct wm_softc *);
   1044 static void	wm_put_eecd(struct wm_softc *);
   1045 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
   1046 static void	wm_put_swsm_semaphore(struct wm_softc *);
   1047 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
   1048 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
   1049 static int	wm_get_nvm_80003(struct wm_softc *);
   1050 static void	wm_put_nvm_80003(struct wm_softc *);
   1051 static int	wm_get_nvm_82571(struct wm_softc *);
   1052 static void	wm_put_nvm_82571(struct wm_softc *);
   1053 static int	wm_get_phy_82575(struct wm_softc *);
   1054 static void	wm_put_phy_82575(struct wm_softc *);
   1055 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
   1056 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
   1057 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
   1058 static void	wm_put_swflag_ich8lan(struct wm_softc *);
   1059 static int	wm_get_nvm_ich8lan(struct wm_softc *);
   1060 static void	wm_put_nvm_ich8lan(struct wm_softc *);
   1061 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
   1062 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
   1063 
   1064 /*
   1065  * Management mode and power management related subroutines.
   1066  * BMC, AMT, suspend/resume and EEE.
   1067  */
   1068 #if 0
   1069 static int	wm_check_mng_mode(struct wm_softc *);
   1070 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
   1071 static int	wm_check_mng_mode_82574(struct wm_softc *);
   1072 static int	wm_check_mng_mode_generic(struct wm_softc *);
   1073 #endif
   1074 static int	wm_enable_mng_pass_thru(struct wm_softc *);
   1075 static bool	wm_phy_resetisblocked(struct wm_softc *);
   1076 static void	wm_get_hw_control(struct wm_softc *);
   1077 static void	wm_release_hw_control(struct wm_softc *);
   1078 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
   1079 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
   1080 static void	wm_init_manageability(struct wm_softc *);
   1081 static void	wm_release_manageability(struct wm_softc *);
   1082 static void	wm_get_wakeup(struct wm_softc *);
   1083 static int	wm_ulp_disable(struct wm_softc *);
   1084 static int	wm_enable_phy_wakeup(struct wm_softc *);
   1085 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
   1086 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
   1087 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1088 static void	wm_enable_wakeup(struct wm_softc *);
   1089 static void	wm_disable_aspm(struct wm_softc *);
   1090 /* LPLU (Low Power Link Up) */
   1091 static void	wm_lplu_d0_disable(struct wm_softc *);
   1092 /* EEE */
   1093 static int	wm_set_eee_i350(struct wm_softc *);
   1094 static int	wm_set_eee_pchlan(struct wm_softc *);
   1095 static int	wm_set_eee(struct wm_softc *);
   1096 
   1097 /*
   1098  * Workarounds (mainly PHY related).
   1099  * Basically, PHY's workarounds are in the PHY drivers.
   1100  */
   1101 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1102 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1103 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1104 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1105 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
   1106 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
   1107 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1108 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1109 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1110 static int	wm_k1_workaround_lv(struct wm_softc *);
   1111 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1112 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1113 static int	wm_set_mdio_slow_mode_hv_locked(struct wm_softc *);
   1114 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1115 static void	wm_reset_init_script_82575(struct wm_softc *);
   1116 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1117 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1118 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1119 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1120 static int	wm_pll_workaround_i210(struct wm_softc *);
   1121 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1122 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
   1123 static void	wm_set_linkdown_discard(struct wm_softc *);
   1124 static void	wm_clear_linkdown_discard(struct wm_softc *);
   1125 
   1126 static int	wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
   1127 static int	wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
   1128 #ifdef WM_DEBUG
   1129 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
   1130 #endif
   1131 
   1132 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1133     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1134 
   1135 /*
   1136  * Devices supported by this driver.
   1137  */
   1138 static const struct wm_product {
   1139 	pci_vendor_id_t		wmp_vendor;
   1140 	pci_product_id_t	wmp_product;
   1141 	const char		*wmp_name;
   1142 	wm_chip_type		wmp_type;
   1143 	uint32_t		wmp_flags;
   1144 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1145 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1146 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1147 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1148 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1149 } wm_products[] = {
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1151 	  "Intel i82542 1000BASE-X Ethernet",
   1152 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1153 
   1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1155 	  "Intel i82543GC 1000BASE-X Ethernet",
   1156 	  WM_T_82543,		WMP_F_FIBER },
   1157 
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1159 	  "Intel i82543GC 1000BASE-T Ethernet",
   1160 	  WM_T_82543,		WMP_F_COPPER },
   1161 
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1163 	  "Intel i82544EI 1000BASE-T Ethernet",
   1164 	  WM_T_82544,		WMP_F_COPPER },
   1165 
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1167 	  "Intel i82544EI 1000BASE-X Ethernet",
   1168 	  WM_T_82544,		WMP_F_FIBER },
   1169 
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1171 	  "Intel i82544GC 1000BASE-T Ethernet",
   1172 	  WM_T_82544,		WMP_F_COPPER },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1175 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1176 	  WM_T_82544,		WMP_F_COPPER },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1179 	  "Intel i82540EM 1000BASE-T Ethernet",
   1180 	  WM_T_82540,		WMP_F_COPPER },
   1181 
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1183 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1184 	  WM_T_82540,		WMP_F_COPPER },
   1185 
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1187 	  "Intel i82540EP 1000BASE-T Ethernet",
   1188 	  WM_T_82540,		WMP_F_COPPER },
   1189 
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1191 	  "Intel i82540EP 1000BASE-T Ethernet",
   1192 	  WM_T_82540,		WMP_F_COPPER },
   1193 
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1195 	  "Intel i82540EP 1000BASE-T Ethernet",
   1196 	  WM_T_82540,		WMP_F_COPPER },
   1197 
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1199 	  "Intel i82545EM 1000BASE-T Ethernet",
   1200 	  WM_T_82545,		WMP_F_COPPER },
   1201 
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1203 	  "Intel i82545GM 1000BASE-T Ethernet",
   1204 	  WM_T_82545_3,		WMP_F_COPPER },
   1205 
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1207 	  "Intel i82545GM 1000BASE-X Ethernet",
   1208 	  WM_T_82545_3,		WMP_F_FIBER },
   1209 
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1211 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1212 	  WM_T_82545_3,		WMP_F_SERDES },
   1213 
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1215 	  "Intel i82546EB 1000BASE-T Ethernet",
   1216 	  WM_T_82546,		WMP_F_COPPER },
   1217 
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1219 	  "Intel i82546EB 1000BASE-T Ethernet",
   1220 	  WM_T_82546,		WMP_F_COPPER },
   1221 
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1223 	  "Intel i82545EM 1000BASE-X Ethernet",
   1224 	  WM_T_82545,		WMP_F_FIBER },
   1225 
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1227 	  "Intel i82546EB 1000BASE-X Ethernet",
   1228 	  WM_T_82546,		WMP_F_FIBER },
   1229 
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1231 	  "Intel i82546GB 1000BASE-T Ethernet",
   1232 	  WM_T_82546_3,		WMP_F_COPPER },
   1233 
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1235 	  "Intel i82546GB 1000BASE-X Ethernet",
   1236 	  WM_T_82546_3,		WMP_F_FIBER },
   1237 
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1239 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1240 	  WM_T_82546_3,		WMP_F_SERDES },
   1241 
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1243 	  "i82546GB quad-port Gigabit Ethernet",
   1244 	  WM_T_82546_3,		WMP_F_COPPER },
   1245 
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1247 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1248 	  WM_T_82546_3,		WMP_F_COPPER },
   1249 
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1251 	  "Intel PRO/1000MT (82546GB)",
   1252 	  WM_T_82546_3,		WMP_F_COPPER },
   1253 
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1255 	  "Intel i82541EI 1000BASE-T Ethernet",
   1256 	  WM_T_82541,		WMP_F_COPPER },
   1257 
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1259 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1260 	  WM_T_82541,		WMP_F_COPPER },
   1261 
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1263 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1264 	  WM_T_82541,		WMP_F_COPPER },
   1265 
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1267 	  "Intel i82541ER 1000BASE-T Ethernet",
   1268 	  WM_T_82541_2,		WMP_F_COPPER },
   1269 
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1271 	  "Intel i82541GI 1000BASE-T Ethernet",
   1272 	  WM_T_82541_2,		WMP_F_COPPER },
   1273 
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1275 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1276 	  WM_T_82541_2,		WMP_F_COPPER },
   1277 
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1279 	  "Intel i82541PI 1000BASE-T Ethernet",
   1280 	  WM_T_82541_2,		WMP_F_COPPER },
   1281 
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1283 	  "Intel i82547EI 1000BASE-T Ethernet",
   1284 	  WM_T_82547,		WMP_F_COPPER },
   1285 
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1287 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1288 	  WM_T_82547,		WMP_F_COPPER },
   1289 
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1291 	  "Intel i82547GI 1000BASE-T Ethernet",
   1292 	  WM_T_82547_2,		WMP_F_COPPER },
   1293 
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1295 	  "Intel PRO/1000 PT (82571EB)",
   1296 	  WM_T_82571,		WMP_F_COPPER },
   1297 
   1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1299 	  "Intel PRO/1000 PF (82571EB)",
   1300 	  WM_T_82571,		WMP_F_FIBER },
   1301 
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1303 	  "Intel PRO/1000 PB (82571EB)",
   1304 	  WM_T_82571,		WMP_F_SERDES },
   1305 
   1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1307 	  "Intel PRO/1000 QT (82571EB)",
   1308 	  WM_T_82571,		WMP_F_COPPER },
   1309 
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1311 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1312 	  WM_T_82571,		WMP_F_COPPER },
   1313 
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1315 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1316 	  WM_T_82571,		WMP_F_COPPER },
   1317 
   1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1319 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1320 	  WM_T_82571,		WMP_F_SERDES },
   1321 
   1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1323 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1324 	  WM_T_82571,		WMP_F_SERDES },
   1325 
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1327 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1328 	  WM_T_82571,		WMP_F_FIBER },
   1329 
   1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1331 	  "Intel i82572EI 1000baseT Ethernet",
   1332 	  WM_T_82572,		WMP_F_COPPER },
   1333 
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1335 	  "Intel i82572EI 1000baseX Ethernet",
   1336 	  WM_T_82572,		WMP_F_FIBER },
   1337 
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1339 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1340 	  WM_T_82572,		WMP_F_SERDES },
   1341 
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1343 	  "Intel i82572EI 1000baseT Ethernet",
   1344 	  WM_T_82572,		WMP_F_COPPER },
   1345 
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1347 	  "Intel i82573E",
   1348 	  WM_T_82573,		WMP_F_COPPER },
   1349 
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1351 	  "Intel i82573E IAMT",
   1352 	  WM_T_82573,		WMP_F_COPPER },
   1353 
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1355 	  "Intel i82573L Gigabit Ethernet",
   1356 	  WM_T_82573,		WMP_F_COPPER },
   1357 
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1359 	  "Intel i82574L",
   1360 	  WM_T_82574,		WMP_F_COPPER },
   1361 
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1363 	  "Intel i82574L",
   1364 	  WM_T_82574,		WMP_F_COPPER },
   1365 
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1367 	  "Intel i82583V",
   1368 	  WM_T_82583,		WMP_F_COPPER },
   1369 
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1371 	  "i80003 dual 1000baseT Ethernet",
   1372 	  WM_T_80003,		WMP_F_COPPER },
   1373 
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1375 	  "i80003 dual 1000baseX Ethernet",
   1376 	  WM_T_80003,		WMP_F_COPPER },
   1377 
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1379 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1380 	  WM_T_80003,		WMP_F_SERDES },
   1381 
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1383 	  "Intel i80003 1000baseT Ethernet",
   1384 	  WM_T_80003,		WMP_F_COPPER },
   1385 
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1387 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1388 	  WM_T_80003,		WMP_F_SERDES },
   1389 
   1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1391 	  "Intel i82801H (M_AMT) LAN Controller",
   1392 	  WM_T_ICH8,		WMP_F_COPPER },
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1394 	  "Intel i82801H (AMT) LAN Controller",
   1395 	  WM_T_ICH8,		WMP_F_COPPER },
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1397 	  "Intel i82801H LAN Controller",
   1398 	  WM_T_ICH8,		WMP_F_COPPER },
   1399 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1400 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1401 	  WM_T_ICH8,		WMP_F_COPPER },
   1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1403 	  "Intel i82801H (M) LAN Controller",
   1404 	  WM_T_ICH8,		WMP_F_COPPER },
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1406 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1407 	  WM_T_ICH8,		WMP_F_COPPER },
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1409 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1410 	  WM_T_ICH8,		WMP_F_COPPER },
   1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1412 	  "82567V-3 LAN Controller",
   1413 	  WM_T_ICH8,		WMP_F_COPPER },
   1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1415 	  "82801I (AMT) LAN Controller",
   1416 	  WM_T_ICH9,		WMP_F_COPPER },
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1418 	  "82801I 10/100 LAN Controller",
   1419 	  WM_T_ICH9,		WMP_F_COPPER },
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1421 	  "82801I (G) 10/100 LAN Controller",
   1422 	  WM_T_ICH9,		WMP_F_COPPER },
   1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1424 	  "82801I (GT) 10/100 LAN Controller",
   1425 	  WM_T_ICH9,		WMP_F_COPPER },
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1427 	  "82801I (C) LAN Controller",
   1428 	  WM_T_ICH9,		WMP_F_COPPER },
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1430 	  "82801I mobile LAN Controller",
   1431 	  WM_T_ICH9,		WMP_F_COPPER },
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1433 	  "82801I mobile (V) LAN Controller",
   1434 	  WM_T_ICH9,		WMP_F_COPPER },
   1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1436 	  "82801I mobile (AMT) LAN Controller",
   1437 	  WM_T_ICH9,		WMP_F_COPPER },
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1439 	  "82567LM-4 LAN Controller",
   1440 	  WM_T_ICH9,		WMP_F_COPPER },
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1442 	  "82567LM-2 LAN Controller",
   1443 	  WM_T_ICH10,		WMP_F_COPPER },
   1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1445 	  "82567LF-2 LAN Controller",
   1446 	  WM_T_ICH10,		WMP_F_COPPER },
   1447 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1448 	  "82567LM-3 LAN Controller",
   1449 	  WM_T_ICH10,		WMP_F_COPPER },
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1451 	  "82567LF-3 LAN Controller",
   1452 	  WM_T_ICH10,		WMP_F_COPPER },
   1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1454 	  "82567V-2 LAN Controller",
   1455 	  WM_T_ICH10,		WMP_F_COPPER },
   1456 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1457 	  "82567V-3? LAN Controller",
   1458 	  WM_T_ICH10,		WMP_F_COPPER },
   1459 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1460 	  "HANKSVILLE LAN Controller",
   1461 	  WM_T_ICH10,		WMP_F_COPPER },
   1462 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1463 	  "PCH LAN (82577LM) Controller",
   1464 	  WM_T_PCH,		WMP_F_COPPER },
   1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1466 	  "PCH LAN (82577LC) Controller",
   1467 	  WM_T_PCH,		WMP_F_COPPER },
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1469 	  "PCH LAN (82578DM) Controller",
   1470 	  WM_T_PCH,		WMP_F_COPPER },
   1471 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1472 	  "PCH LAN (82578DC) Controller",
   1473 	  WM_T_PCH,		WMP_F_COPPER },
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1475 	  "PCH2 LAN (82579LM) Controller",
   1476 	  WM_T_PCH2,		WMP_F_COPPER },
   1477 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1478 	  "PCH2 LAN (82579V) Controller",
   1479 	  WM_T_PCH2,		WMP_F_COPPER },
   1480 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1481 	  "82575EB dual-1000baseT Ethernet",
   1482 	  WM_T_82575,		WMP_F_COPPER },
   1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1484 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1485 	  WM_T_82575,		WMP_F_SERDES },
   1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1487 	  "82575GB quad-1000baseT Ethernet",
   1488 	  WM_T_82575,		WMP_F_COPPER },
   1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1490 	  "82575GB quad-1000baseT Ethernet (PM)",
   1491 	  WM_T_82575,		WMP_F_COPPER },
   1492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1493 	  "82576 1000BaseT Ethernet",
   1494 	  WM_T_82576,		WMP_F_COPPER },
   1495 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1496 	  "82576 1000BaseX Ethernet",
   1497 	  WM_T_82576,		WMP_F_FIBER },
   1498 
   1499 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1500 	  "82576 gigabit Ethernet (SERDES)",
   1501 	  WM_T_82576,		WMP_F_SERDES },
   1502 
   1503 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1504 	  "82576 quad-1000BaseT Ethernet",
   1505 	  WM_T_82576,		WMP_F_COPPER },
   1506 
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1508 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1509 	  WM_T_82576,		WMP_F_COPPER },
   1510 
   1511 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1512 	  "82576 gigabit Ethernet",
   1513 	  WM_T_82576,		WMP_F_COPPER },
   1514 
   1515 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1516 	  "82576 gigabit Ethernet (SERDES)",
   1517 	  WM_T_82576,		WMP_F_SERDES },
   1518 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1519 	  "82576 quad-gigabit Ethernet (SERDES)",
   1520 	  WM_T_82576,		WMP_F_SERDES },
   1521 
   1522 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1523 	  "82580 1000BaseT Ethernet",
   1524 	  WM_T_82580,		WMP_F_COPPER },
   1525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1526 	  "82580 1000BaseX Ethernet",
   1527 	  WM_T_82580,		WMP_F_FIBER },
   1528 
   1529 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1530 	  "82580 1000BaseT Ethernet (SERDES)",
   1531 	  WM_T_82580,		WMP_F_SERDES },
   1532 
   1533 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1534 	  "82580 gigabit Ethernet (SGMII)",
   1535 	  WM_T_82580,		WMP_F_COPPER },
   1536 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1537 	  "82580 dual-1000BaseT Ethernet",
   1538 	  WM_T_82580,		WMP_F_COPPER },
   1539 
   1540 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1541 	  "82580 quad-1000BaseX Ethernet",
   1542 	  WM_T_82580,		WMP_F_FIBER },
   1543 
   1544 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1545 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1546 	  WM_T_82580,		WMP_F_COPPER },
   1547 
   1548 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1549 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1550 	  WM_T_82580,		WMP_F_SERDES },
   1551 
   1552 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1553 	  "DH89XXCC 1000BASE-KX Ethernet",
   1554 	  WM_T_82580,		WMP_F_SERDES },
   1555 
   1556 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1557 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1558 	  WM_T_82580,		WMP_F_SERDES },
   1559 
   1560 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1561 	  "I350 Gigabit Network Connection",
   1562 	  WM_T_I350,		WMP_F_COPPER },
   1563 
   1564 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1565 	  "I350 Gigabit Fiber Network Connection",
   1566 	  WM_T_I350,		WMP_F_FIBER },
   1567 
   1568 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1569 	  "I350 Gigabit Backplane Connection",
   1570 	  WM_T_I350,		WMP_F_SERDES },
   1571 
   1572 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1573 	  "I350 Quad Port Gigabit Ethernet",
   1574 	  WM_T_I350,		WMP_F_SERDES },
   1575 
   1576 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1577 	  "I350 Gigabit Connection",
   1578 	  WM_T_I350,		WMP_F_COPPER },
   1579 
   1580 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1581 	  "I354 Gigabit Ethernet (KX)",
   1582 	  WM_T_I354,		WMP_F_SERDES },
   1583 
   1584 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1585 	  "I354 Gigabit Ethernet (SGMII)",
   1586 	  WM_T_I354,		WMP_F_COPPER },
   1587 
   1588 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1589 	  "I354 Gigabit Ethernet (2.5G)",
   1590 	  WM_T_I354,		WMP_F_COPPER },
   1591 
   1592 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1593 	  "I210-T1 Ethernet Server Adapter",
   1594 	  WM_T_I210,		WMP_F_COPPER },
   1595 
   1596 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1597 	  "I210 Ethernet (Copper OEM)",
   1598 	  WM_T_I210,		WMP_F_COPPER },
   1599 
   1600 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1601 	  "I210 Ethernet (Copper IT)",
   1602 	  WM_T_I210,		WMP_F_COPPER },
   1603 
   1604 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1605 	  "I210 Ethernet (Copper, FLASH less)",
   1606 	  WM_T_I210,		WMP_F_COPPER },
   1607 
   1608 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1609 	  "I210 Gigabit Ethernet (Fiber)",
   1610 	  WM_T_I210,		WMP_F_FIBER },
   1611 
   1612 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1613 	  "I210 Gigabit Ethernet (SERDES)",
   1614 	  WM_T_I210,		WMP_F_SERDES },
   1615 
   1616 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1617 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1618 	  WM_T_I210,		WMP_F_SERDES },
   1619 
   1620 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1621 	  "I210 Gigabit Ethernet (SGMII)",
   1622 	  WM_T_I210,		WMP_F_COPPER },
   1623 
   1624 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1625 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1626 	  WM_T_I210,		WMP_F_COPPER },
   1627 
   1628 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1629 	  "I211 Ethernet (COPPER)",
   1630 	  WM_T_I211,		WMP_F_COPPER },
   1631 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1632 	  "I217 V Ethernet Connection",
   1633 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1634 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1635 	  "I217 LM Ethernet Connection",
   1636 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1637 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1638 	  "I218 V Ethernet Connection",
   1639 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1640 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1641 	  "I218 V Ethernet Connection",
   1642 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1643 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1644 	  "I218 V Ethernet Connection",
   1645 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1646 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1647 	  "I218 LM Ethernet Connection",
   1648 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1649 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1650 	  "I218 LM Ethernet Connection",
   1651 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1652 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1653 	  "I218 LM Ethernet Connection",
   1654 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1655 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1656 	  "I219 LM Ethernet Connection",
   1657 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1658 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1659 	  "I219 LM (2) Ethernet Connection",
   1660 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1661 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1662 	  "I219 LM (3) Ethernet Connection",
   1663 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1664 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1665 	  "I219 LM (4) Ethernet Connection",
   1666 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1667 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1668 	  "I219 LM (5) Ethernet Connection",
   1669 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1670 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1671 	  "I219 LM (6) Ethernet Connection",
   1672 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1673 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1674 	  "I219 LM (7) Ethernet Connection",
   1675 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1676 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1677 	  "I219 LM (8) Ethernet Connection",
   1678 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1679 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1680 	  "I219 LM (9) Ethernet Connection",
   1681 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1682 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1683 	  "I219 LM (10) Ethernet Connection",
   1684 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1685 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1686 	  "I219 LM (11) Ethernet Connection",
   1687 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1688 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1689 	  "I219 LM (12) Ethernet Connection",
   1690 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1691 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1692 	  "I219 LM (13) Ethernet Connection",
   1693 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1694 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1695 	  "I219 LM (14) Ethernet Connection",
   1696 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1697 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1698 	  "I219 LM (15) Ethernet Connection",
   1699 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1700 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM16,
   1701 	  "I219 LM (16) Ethernet Connection",
   1702 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1703 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM17,
   1704 	  "I219 LM (17) Ethernet Connection",
   1705 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1706 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM18,
   1707 	  "I219 LM (18) Ethernet Connection",
   1708 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1709 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM19,
   1710 	  "I219 LM (19) Ethernet Connection",
   1711 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1712 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1713 	  "I219 V Ethernet Connection",
   1714 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1715 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1716 	  "I219 V (2) Ethernet Connection",
   1717 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1718 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1719 	  "I219 V (4) Ethernet Connection",
   1720 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1721 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1722 	  "I219 V (5) Ethernet Connection",
   1723 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1724 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1725 	  "I219 V (6) Ethernet Connection",
   1726 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1727 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1728 	  "I219 V (7) Ethernet Connection",
   1729 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1730 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1731 	  "I219 V (8) Ethernet Connection",
   1732 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1733 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1734 	  "I219 V (9) Ethernet Connection",
   1735 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1736 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1737 	  "I219 V (10) Ethernet Connection",
   1738 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1739 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1740 	  "I219 V (11) Ethernet Connection",
   1741 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1742 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1743 	  "I219 V (12) Ethernet Connection",
   1744 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1745 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1746 	  "I219 V (13) Ethernet Connection",
   1747 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1748 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1749 	  "I219 V (14) Ethernet Connection",
   1750 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1751 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V15,
   1752 	  "I219 V (15) Ethernet Connection",
   1753 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1754 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V16,
   1755 	  "I219 V (16) Ethernet Connection",
   1756 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1757 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V17,
   1758 	  "I219 V (17) Ethernet Connection",
   1759 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1760 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V18,
   1761 	  "I219 V (18) Ethernet Connection",
   1762 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1763 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V19,
   1764 	  "I219 V (19) Ethernet Connection",
   1765 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1766 	{ 0,			0,
   1767 	  NULL,
   1768 	  0,			0 },
   1769 };
   1770 
   1771 /*
   1772  * Register read/write functions.
   1773  * Other than CSR_{READ|WRITE}().
   1774  */
   1775 
   1776 #if 0 /* Not currently used */
   1777 static inline uint32_t
   1778 wm_io_read(struct wm_softc *sc, int reg)
   1779 {
   1780 
   1781 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1782 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1783 }
   1784 #endif
   1785 
   1786 static inline void
   1787 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1788 {
   1789 
   1790 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1791 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1792 }
   1793 
   1794 static inline void
   1795 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1796     uint32_t data)
   1797 {
   1798 	uint32_t regval;
   1799 	int i;
   1800 
   1801 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1802 
   1803 	CSR_WRITE(sc, reg, regval);
   1804 
   1805 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1806 		delay(5);
   1807 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1808 			break;
   1809 	}
   1810 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1811 		aprint_error("%s: WARNING:"
   1812 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1813 		    device_xname(sc->sc_dev), reg);
   1814 	}
   1815 }
   1816 
   1817 static inline void
   1818 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1819 {
   1820 	wa->wa_low = htole32(BUS_ADDR_LO32(v));
   1821 	wa->wa_high = htole32(BUS_ADDR_HI32(v));
   1822 }
   1823 
   1824 /*
   1825  * Descriptor sync/init functions.
   1826  */
   1827 static inline void
   1828 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1829 {
   1830 	struct wm_softc *sc = txq->txq_sc;
   1831 
   1832 	/* If it will wrap around, sync to the end of the ring. */
   1833 	if ((start + num) > WM_NTXDESC(txq)) {
   1834 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1835 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1836 		    (WM_NTXDESC(txq) - start), ops);
   1837 		num -= (WM_NTXDESC(txq) - start);
   1838 		start = 0;
   1839 	}
   1840 
   1841 	/* Now sync whatever is left. */
   1842 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1843 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1844 }
   1845 
   1846 static inline void
   1847 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1848 {
   1849 	struct wm_softc *sc = rxq->rxq_sc;
   1850 
   1851 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1852 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1853 }
   1854 
   1855 static inline void
   1856 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1857 {
   1858 	struct wm_softc *sc = rxq->rxq_sc;
   1859 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1860 	struct mbuf *m = rxs->rxs_mbuf;
   1861 
   1862 	/*
   1863 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1864 	 * so that the payload after the Ethernet header is aligned
   1865 	 * to a 4-byte boundary.
   1866 
   1867 	 * XXX BRAINDAMAGE ALERT!
   1868 	 * The stupid chip uses the same size for every buffer, which
   1869 	 * is set in the Receive Control register.  We are using the 2K
   1870 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1871 	 * reason, we can't "scoot" packets longer than the standard
   1872 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1873 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1874 	 * the upper layer copy the headers.
   1875 	 */
   1876 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1877 
   1878 	if (sc->sc_type == WM_T_82574) {
   1879 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1880 		rxd->erx_data.erxd_addr =
   1881 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1882 		rxd->erx_data.erxd_dd = 0;
   1883 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1884 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1885 
   1886 		rxd->nqrx_data.nrxd_paddr =
   1887 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1888 		/* Currently, split header is not supported. */
   1889 		rxd->nqrx_data.nrxd_haddr = 0;
   1890 	} else {
   1891 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1892 
   1893 		wm_set_dma_addr(&rxd->wrx_addr,
   1894 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1895 		rxd->wrx_len = 0;
   1896 		rxd->wrx_cksum = 0;
   1897 		rxd->wrx_status = 0;
   1898 		rxd->wrx_errors = 0;
   1899 		rxd->wrx_special = 0;
   1900 	}
   1901 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1902 
   1903 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1904 }
   1905 
   1906 /*
   1907  * Device driver interface functions and commonly used functions.
   1908  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1909  */
   1910 
   1911 /* Lookup supported device table */
   1912 static const struct wm_product *
   1913 wm_lookup(const struct pci_attach_args *pa)
   1914 {
   1915 	const struct wm_product *wmp;
   1916 
   1917 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1918 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1919 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1920 			return wmp;
   1921 	}
   1922 	return NULL;
   1923 }
   1924 
   1925 /* The match function (ca_match) */
   1926 static int
   1927 wm_match(device_t parent, cfdata_t cf, void *aux)
   1928 {
   1929 	struct pci_attach_args *pa = aux;
   1930 
   1931 	if (wm_lookup(pa) != NULL)
   1932 		return 1;
   1933 
   1934 	return 0;
   1935 }
   1936 
   1937 /* The attach function (ca_attach) */
   1938 static void
   1939 wm_attach(device_t parent, device_t self, void *aux)
   1940 {
   1941 	struct wm_softc *sc = device_private(self);
   1942 	struct pci_attach_args *pa = aux;
   1943 	prop_dictionary_t dict;
   1944 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1945 	pci_chipset_tag_t pc = pa->pa_pc;
   1946 	int counts[PCI_INTR_TYPE_SIZE];
   1947 	pci_intr_type_t max_type;
   1948 	const char *eetype, *xname;
   1949 	bus_space_tag_t memt;
   1950 	bus_space_handle_t memh;
   1951 	bus_size_t memsize;
   1952 	int memh_valid;
   1953 	int i, error;
   1954 	const struct wm_product *wmp;
   1955 	prop_data_t ea;
   1956 	prop_number_t pn;
   1957 	uint8_t enaddr[ETHER_ADDR_LEN];
   1958 	char buf[256];
   1959 	char wqname[MAXCOMLEN];
   1960 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1961 	pcireg_t preg, memtype;
   1962 	uint16_t eeprom_data, apme_mask;
   1963 	bool force_clear_smbi;
   1964 	uint32_t link_mode;
   1965 	uint32_t reg;
   1966 
   1967 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
   1968 	sc->sc_debug = WM_DEBUG_DEFAULT;
   1969 #endif
   1970 	sc->sc_dev = self;
   1971 	callout_init(&sc->sc_tick_ch, CALLOUT_MPSAFE);
   1972 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   1973 	sc->sc_core_stopping = false;
   1974 
   1975 	wmp = wm_lookup(pa);
   1976 #ifdef DIAGNOSTIC
   1977 	if (wmp == NULL) {
   1978 		printf("\n");
   1979 		panic("wm_attach: impossible");
   1980 	}
   1981 #endif
   1982 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1983 
   1984 	sc->sc_pc = pa->pa_pc;
   1985 	sc->sc_pcitag = pa->pa_tag;
   1986 
   1987 	if (pci_dma64_available(pa)) {
   1988 		aprint_verbose(", 64-bit DMA");
   1989 		sc->sc_dmat = pa->pa_dmat64;
   1990 	} else {
   1991 		aprint_verbose(", 32-bit DMA");
   1992 		sc->sc_dmat = pa->pa_dmat;
   1993 	}
   1994 
   1995 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1996 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1997 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1998 
   1999 	sc->sc_type = wmp->wmp_type;
   2000 
   2001 	/* Set default function pointers */
   2002 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   2003 	sc->phy.release = sc->nvm.release = wm_put_null;
   2004 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   2005 
   2006 	if (sc->sc_type < WM_T_82543) {
   2007 		if (sc->sc_rev < 2) {
   2008 			aprint_error_dev(sc->sc_dev,
   2009 			    "i82542 must be at least rev. 2\n");
   2010 			return;
   2011 		}
   2012 		if (sc->sc_rev < 3)
   2013 			sc->sc_type = WM_T_82542_2_0;
   2014 	}
   2015 
   2016 	/*
   2017 	 * Disable MSI for Errata:
   2018 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   2019 	 *
   2020 	 *  82544: Errata 25
   2021 	 *  82540: Errata  6 (easy to reproduce device timeout)
   2022 	 *  82545: Errata  4 (easy to reproduce device timeout)
   2023 	 *  82546: Errata 26 (easy to reproduce device timeout)
   2024 	 *  82541: Errata  7 (easy to reproduce device timeout)
   2025 	 *
   2026 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   2027 	 *
   2028 	 *  82571 & 82572: Errata 63
   2029 	 */
   2030 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   2031 	    || (sc->sc_type == WM_T_82572))
   2032 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   2033 
   2034 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2035 	    || (sc->sc_type == WM_T_82580)
   2036 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   2037 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   2038 		sc->sc_flags |= WM_F_NEWQUEUE;
   2039 
   2040 	/* Set device properties (mactype) */
   2041 	dict = device_properties(sc->sc_dev);
   2042 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   2043 
   2044 	/*
   2045 	 * Map the device.  All devices support memory-mapped acccess,
   2046 	 * and it is really required for normal operation.
   2047 	 */
   2048 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   2049 	switch (memtype) {
   2050 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   2051 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   2052 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   2053 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   2054 		break;
   2055 	default:
   2056 		memh_valid = 0;
   2057 		break;
   2058 	}
   2059 
   2060 	if (memh_valid) {
   2061 		sc->sc_st = memt;
   2062 		sc->sc_sh = memh;
   2063 		sc->sc_ss = memsize;
   2064 	} else {
   2065 		aprint_error_dev(sc->sc_dev,
   2066 		    "unable to map device registers\n");
   2067 		return;
   2068 	}
   2069 
   2070 	/*
   2071 	 * In addition, i82544 and later support I/O mapped indirect
   2072 	 * register access.  It is not desirable (nor supported in
   2073 	 * this driver) to use it for normal operation, though it is
   2074 	 * required to work around bugs in some chip versions.
   2075 	 */
   2076 	switch (sc->sc_type) {
   2077 	case WM_T_82544:
   2078 	case WM_T_82541:
   2079 	case WM_T_82541_2:
   2080 	case WM_T_82547:
   2081 	case WM_T_82547_2:
   2082 		/* First we have to find the I/O BAR. */
   2083 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   2084 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   2085 			if (memtype == PCI_MAPREG_TYPE_IO)
   2086 				break;
   2087 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   2088 			    PCI_MAPREG_MEM_TYPE_64BIT)
   2089 				i += 4;	/* skip high bits, too */
   2090 		}
   2091 		if (i < PCI_MAPREG_END) {
   2092 			/*
   2093 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   2094 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   2095 			 * It's no problem because newer chips has no this
   2096 			 * bug.
   2097 			 *
   2098 			 * The i8254x doesn't apparently respond when the
   2099 			 * I/O BAR is 0, which looks somewhat like it's not
   2100 			 * been configured.
   2101 			 */
   2102 			preg = pci_conf_read(pc, pa->pa_tag, i);
   2103 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   2104 				aprint_error_dev(sc->sc_dev,
   2105 				    "WARNING: I/O BAR at zero.\n");
   2106 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   2107 			    0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)
   2108 			    == 0) {
   2109 				sc->sc_flags |= WM_F_IOH_VALID;
   2110 			} else
   2111 				aprint_error_dev(sc->sc_dev,
   2112 				    "WARNING: unable to map I/O space\n");
   2113 		}
   2114 		break;
   2115 	default:
   2116 		break;
   2117 	}
   2118 
   2119 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   2120 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   2121 	preg |= PCI_COMMAND_MASTER_ENABLE;
   2122 	if (sc->sc_type < WM_T_82542_2_1)
   2123 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   2124 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   2125 
   2126 	/* Power up chip */
   2127 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   2128 	    && error != EOPNOTSUPP) {
   2129 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   2130 		return;
   2131 	}
   2132 
   2133 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   2134 	/*
   2135 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   2136 	 * resource.
   2137 	 */
   2138 	if (sc->sc_nqueues > 1) {
   2139 		max_type = PCI_INTR_TYPE_MSIX;
   2140 		/*
   2141 		 *  82583 has a MSI-X capability in the PCI configuration space
   2142 		 * but it doesn't support it. At least the document doesn't
   2143 		 * say anything about MSI-X.
   2144 		 */
   2145 		counts[PCI_INTR_TYPE_MSIX]
   2146 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2147 	} else {
   2148 		max_type = PCI_INTR_TYPE_MSI;
   2149 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2150 	}
   2151 
   2152 	/* Allocation settings */
   2153 	counts[PCI_INTR_TYPE_MSI] = 1;
   2154 	counts[PCI_INTR_TYPE_INTX] = 1;
   2155 	/* overridden by disable flags */
   2156 	if (wm_disable_msi != 0) {
   2157 		counts[PCI_INTR_TYPE_MSI] = 0;
   2158 		if (wm_disable_msix != 0) {
   2159 			max_type = PCI_INTR_TYPE_INTX;
   2160 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2161 		}
   2162 	} else if (wm_disable_msix != 0) {
   2163 		max_type = PCI_INTR_TYPE_MSI;
   2164 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2165 	}
   2166 
   2167 alloc_retry:
   2168 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2169 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2170 		return;
   2171 	}
   2172 
   2173 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2174 		error = wm_setup_msix(sc);
   2175 		if (error) {
   2176 			pci_intr_release(pc, sc->sc_intrs,
   2177 			    counts[PCI_INTR_TYPE_MSIX]);
   2178 
   2179 			/* Setup for MSI: Disable MSI-X */
   2180 			max_type = PCI_INTR_TYPE_MSI;
   2181 			counts[PCI_INTR_TYPE_MSI] = 1;
   2182 			counts[PCI_INTR_TYPE_INTX] = 1;
   2183 			goto alloc_retry;
   2184 		}
   2185 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2186 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2187 		error = wm_setup_legacy(sc);
   2188 		if (error) {
   2189 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2190 			    counts[PCI_INTR_TYPE_MSI]);
   2191 
   2192 			/* The next try is for INTx: Disable MSI */
   2193 			max_type = PCI_INTR_TYPE_INTX;
   2194 			counts[PCI_INTR_TYPE_INTX] = 1;
   2195 			goto alloc_retry;
   2196 		}
   2197 	} else {
   2198 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2199 		error = wm_setup_legacy(sc);
   2200 		if (error) {
   2201 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2202 			    counts[PCI_INTR_TYPE_INTX]);
   2203 			return;
   2204 		}
   2205 	}
   2206 
   2207 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2208 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2209 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2210 	    WQ_PERCPU | WQ_MPSAFE);
   2211 	if (error) {
   2212 		aprint_error_dev(sc->sc_dev,
   2213 		    "unable to create TxRx workqueue\n");
   2214 		goto out;
   2215 	}
   2216 
   2217 	snprintf(wqname, sizeof(wqname), "%sReset", device_xname(sc->sc_dev));
   2218 	error = workqueue_create(&sc->sc_reset_wq, wqname,
   2219 	    wm_handle_reset_work, sc, WM_WORKQUEUE_PRI, IPL_SOFTCLOCK,
   2220 	    WQ_MPSAFE);
   2221 	if (error) {
   2222 		workqueue_destroy(sc->sc_queue_wq);
   2223 		aprint_error_dev(sc->sc_dev,
   2224 		    "unable to create reset workqueue\n");
   2225 		goto out;
   2226 	}
   2227 
   2228 	/*
   2229 	 * Check the function ID (unit number of the chip).
   2230 	 */
   2231 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2232 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2233 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2234 	    || (sc->sc_type == WM_T_82580)
   2235 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2236 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2237 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2238 	else
   2239 		sc->sc_funcid = 0;
   2240 
   2241 	/*
   2242 	 * Determine a few things about the bus we're connected to.
   2243 	 */
   2244 	if (sc->sc_type < WM_T_82543) {
   2245 		/* We don't really know the bus characteristics here. */
   2246 		sc->sc_bus_speed = 33;
   2247 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2248 		/*
   2249 		 * CSA (Communication Streaming Architecture) is about as fast
   2250 		 * a 32-bit 66MHz PCI Bus.
   2251 		 */
   2252 		sc->sc_flags |= WM_F_CSA;
   2253 		sc->sc_bus_speed = 66;
   2254 		aprint_verbose_dev(sc->sc_dev,
   2255 		    "Communication Streaming Architecture\n");
   2256 		if (sc->sc_type == WM_T_82547) {
   2257 			callout_init(&sc->sc_txfifo_ch, CALLOUT_MPSAFE);
   2258 			callout_setfunc(&sc->sc_txfifo_ch,
   2259 			    wm_82547_txfifo_stall, sc);
   2260 			aprint_verbose_dev(sc->sc_dev,
   2261 			    "using 82547 Tx FIFO stall work-around\n");
   2262 		}
   2263 	} else if (sc->sc_type >= WM_T_82571) {
   2264 		sc->sc_flags |= WM_F_PCIE;
   2265 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2266 		    && (sc->sc_type != WM_T_ICH10)
   2267 		    && (sc->sc_type != WM_T_PCH)
   2268 		    && (sc->sc_type != WM_T_PCH2)
   2269 		    && (sc->sc_type != WM_T_PCH_LPT)
   2270 		    && (sc->sc_type != WM_T_PCH_SPT)
   2271 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2272 			/* ICH* and PCH* have no PCIe capability registers */
   2273 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2274 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2275 				NULL) == 0)
   2276 				aprint_error_dev(sc->sc_dev,
   2277 				    "unable to find PCIe capability\n");
   2278 		}
   2279 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2280 	} else {
   2281 		reg = CSR_READ(sc, WMREG_STATUS);
   2282 		if (reg & STATUS_BUS64)
   2283 			sc->sc_flags |= WM_F_BUS64;
   2284 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2285 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2286 
   2287 			sc->sc_flags |= WM_F_PCIX;
   2288 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2289 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2290 				aprint_error_dev(sc->sc_dev,
   2291 				    "unable to find PCIX capability\n");
   2292 			else if (sc->sc_type != WM_T_82545_3 &&
   2293 			    sc->sc_type != WM_T_82546_3) {
   2294 				/*
   2295 				 * Work around a problem caused by the BIOS
   2296 				 * setting the max memory read byte count
   2297 				 * incorrectly.
   2298 				 */
   2299 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2300 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2301 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2302 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2303 
   2304 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2305 				    PCIX_CMD_BYTECNT_SHIFT;
   2306 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2307 				    PCIX_STATUS_MAXB_SHIFT;
   2308 				if (bytecnt > maxb) {
   2309 					aprint_verbose_dev(sc->sc_dev,
   2310 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2311 					    512 << bytecnt, 512 << maxb);
   2312 					pcix_cmd = (pcix_cmd &
   2313 					    ~PCIX_CMD_BYTECNT_MASK) |
   2314 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2315 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2316 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2317 					    pcix_cmd);
   2318 				}
   2319 			}
   2320 		}
   2321 		/*
   2322 		 * The quad port adapter is special; it has a PCIX-PCIX
   2323 		 * bridge on the board, and can run the secondary bus at
   2324 		 * a higher speed.
   2325 		 */
   2326 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2327 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2328 								      : 66;
   2329 		} else if (sc->sc_flags & WM_F_PCIX) {
   2330 			switch (reg & STATUS_PCIXSPD_MASK) {
   2331 			case STATUS_PCIXSPD_50_66:
   2332 				sc->sc_bus_speed = 66;
   2333 				break;
   2334 			case STATUS_PCIXSPD_66_100:
   2335 				sc->sc_bus_speed = 100;
   2336 				break;
   2337 			case STATUS_PCIXSPD_100_133:
   2338 				sc->sc_bus_speed = 133;
   2339 				break;
   2340 			default:
   2341 				aprint_error_dev(sc->sc_dev,
   2342 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2343 				    reg & STATUS_PCIXSPD_MASK);
   2344 				sc->sc_bus_speed = 66;
   2345 				break;
   2346 			}
   2347 		} else
   2348 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2349 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2350 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2351 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2352 	}
   2353 
   2354 	/* clear interesting stat counters */
   2355 	CSR_READ(sc, WMREG_COLC);
   2356 	CSR_READ(sc, WMREG_RXERRC);
   2357 
   2358 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2359 	    || (sc->sc_type >= WM_T_ICH8))
   2360 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2361 	if (sc->sc_type >= WM_T_ICH8)
   2362 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2363 
   2364 	/* Set PHY, NVM mutex related stuff */
   2365 	switch (sc->sc_type) {
   2366 	case WM_T_82542_2_0:
   2367 	case WM_T_82542_2_1:
   2368 	case WM_T_82543:
   2369 	case WM_T_82544:
   2370 		/* Microwire */
   2371 		sc->nvm.read = wm_nvm_read_uwire;
   2372 		sc->sc_nvm_wordsize = 64;
   2373 		sc->sc_nvm_addrbits = 6;
   2374 		break;
   2375 	case WM_T_82540:
   2376 	case WM_T_82545:
   2377 	case WM_T_82545_3:
   2378 	case WM_T_82546:
   2379 	case WM_T_82546_3:
   2380 		/* Microwire */
   2381 		sc->nvm.read = wm_nvm_read_uwire;
   2382 		reg = CSR_READ(sc, WMREG_EECD);
   2383 		if (reg & EECD_EE_SIZE) {
   2384 			sc->sc_nvm_wordsize = 256;
   2385 			sc->sc_nvm_addrbits = 8;
   2386 		} else {
   2387 			sc->sc_nvm_wordsize = 64;
   2388 			sc->sc_nvm_addrbits = 6;
   2389 		}
   2390 		sc->sc_flags |= WM_F_LOCK_EECD;
   2391 		sc->nvm.acquire = wm_get_eecd;
   2392 		sc->nvm.release = wm_put_eecd;
   2393 		break;
   2394 	case WM_T_82541:
   2395 	case WM_T_82541_2:
   2396 	case WM_T_82547:
   2397 	case WM_T_82547_2:
   2398 		reg = CSR_READ(sc, WMREG_EECD);
   2399 		/*
   2400 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2401 		 * on 8254[17], so set flags and functios before calling it.
   2402 		 */
   2403 		sc->sc_flags |= WM_F_LOCK_EECD;
   2404 		sc->nvm.acquire = wm_get_eecd;
   2405 		sc->nvm.release = wm_put_eecd;
   2406 		if (reg & EECD_EE_TYPE) {
   2407 			/* SPI */
   2408 			sc->nvm.read = wm_nvm_read_spi;
   2409 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2410 			wm_nvm_set_addrbits_size_eecd(sc);
   2411 		} else {
   2412 			/* Microwire */
   2413 			sc->nvm.read = wm_nvm_read_uwire;
   2414 			if ((reg & EECD_EE_ABITS) != 0) {
   2415 				sc->sc_nvm_wordsize = 256;
   2416 				sc->sc_nvm_addrbits = 8;
   2417 			} else {
   2418 				sc->sc_nvm_wordsize = 64;
   2419 				sc->sc_nvm_addrbits = 6;
   2420 			}
   2421 		}
   2422 		break;
   2423 	case WM_T_82571:
   2424 	case WM_T_82572:
   2425 		/* SPI */
   2426 		sc->nvm.read = wm_nvm_read_eerd;
   2427 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2428 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2429 		wm_nvm_set_addrbits_size_eecd(sc);
   2430 		sc->phy.acquire = wm_get_swsm_semaphore;
   2431 		sc->phy.release = wm_put_swsm_semaphore;
   2432 		sc->nvm.acquire = wm_get_nvm_82571;
   2433 		sc->nvm.release = wm_put_nvm_82571;
   2434 		break;
   2435 	case WM_T_82573:
   2436 	case WM_T_82574:
   2437 	case WM_T_82583:
   2438 		sc->nvm.read = wm_nvm_read_eerd;
   2439 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2440 		if (sc->sc_type == WM_T_82573) {
   2441 			sc->phy.acquire = wm_get_swsm_semaphore;
   2442 			sc->phy.release = wm_put_swsm_semaphore;
   2443 			sc->nvm.acquire = wm_get_nvm_82571;
   2444 			sc->nvm.release = wm_put_nvm_82571;
   2445 		} else {
   2446 			/* Both PHY and NVM use the same semaphore. */
   2447 			sc->phy.acquire = sc->nvm.acquire
   2448 			    = wm_get_swfwhw_semaphore;
   2449 			sc->phy.release = sc->nvm.release
   2450 			    = wm_put_swfwhw_semaphore;
   2451 		}
   2452 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2453 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2454 			sc->sc_nvm_wordsize = 2048;
   2455 		} else {
   2456 			/* SPI */
   2457 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2458 			wm_nvm_set_addrbits_size_eecd(sc);
   2459 		}
   2460 		break;
   2461 	case WM_T_82575:
   2462 	case WM_T_82576:
   2463 	case WM_T_82580:
   2464 	case WM_T_I350:
   2465 	case WM_T_I354:
   2466 	case WM_T_80003:
   2467 		/* SPI */
   2468 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2469 		wm_nvm_set_addrbits_size_eecd(sc);
   2470 		if ((sc->sc_type == WM_T_80003)
   2471 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2472 			sc->nvm.read = wm_nvm_read_eerd;
   2473 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2474 		} else {
   2475 			sc->nvm.read = wm_nvm_read_spi;
   2476 			sc->sc_flags |= WM_F_LOCK_EECD;
   2477 		}
   2478 		sc->phy.acquire = wm_get_phy_82575;
   2479 		sc->phy.release = wm_put_phy_82575;
   2480 		sc->nvm.acquire = wm_get_nvm_80003;
   2481 		sc->nvm.release = wm_put_nvm_80003;
   2482 		break;
   2483 	case WM_T_ICH8:
   2484 	case WM_T_ICH9:
   2485 	case WM_T_ICH10:
   2486 	case WM_T_PCH:
   2487 	case WM_T_PCH2:
   2488 	case WM_T_PCH_LPT:
   2489 		sc->nvm.read = wm_nvm_read_ich8;
   2490 		/* FLASH */
   2491 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2492 		sc->sc_nvm_wordsize = 2048;
   2493 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2494 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2495 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2496 			aprint_error_dev(sc->sc_dev,
   2497 			    "can't map FLASH registers\n");
   2498 			goto out;
   2499 		}
   2500 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2501 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2502 		    ICH_FLASH_SECTOR_SIZE;
   2503 		sc->sc_ich8_flash_bank_size =
   2504 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2505 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2506 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2507 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2508 		sc->sc_flashreg_offset = 0;
   2509 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2510 		sc->phy.release = wm_put_swflag_ich8lan;
   2511 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2512 		sc->nvm.release = wm_put_nvm_ich8lan;
   2513 		break;
   2514 	case WM_T_PCH_SPT:
   2515 	case WM_T_PCH_CNP:
   2516 		sc->nvm.read = wm_nvm_read_spt;
   2517 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2518 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2519 		sc->sc_flasht = sc->sc_st;
   2520 		sc->sc_flashh = sc->sc_sh;
   2521 		sc->sc_ich8_flash_base = 0;
   2522 		sc->sc_nvm_wordsize =
   2523 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2524 		    * NVM_SIZE_MULTIPLIER;
   2525 		/* It is size in bytes, we want words */
   2526 		sc->sc_nvm_wordsize /= 2;
   2527 		/* Assume 2 banks */
   2528 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2529 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2530 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2531 		sc->phy.release = wm_put_swflag_ich8lan;
   2532 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2533 		sc->nvm.release = wm_put_nvm_ich8lan;
   2534 		break;
   2535 	case WM_T_I210:
   2536 	case WM_T_I211:
   2537 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2538 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2539 		if (wm_nvm_flash_presence_i210(sc)) {
   2540 			sc->nvm.read = wm_nvm_read_eerd;
   2541 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2542 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2543 			wm_nvm_set_addrbits_size_eecd(sc);
   2544 		} else {
   2545 			sc->nvm.read = wm_nvm_read_invm;
   2546 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2547 			sc->sc_nvm_wordsize = INVM_SIZE;
   2548 		}
   2549 		sc->phy.acquire = wm_get_phy_82575;
   2550 		sc->phy.release = wm_put_phy_82575;
   2551 		sc->nvm.acquire = wm_get_nvm_80003;
   2552 		sc->nvm.release = wm_put_nvm_80003;
   2553 		break;
   2554 	default:
   2555 		break;
   2556 	}
   2557 
   2558 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2559 	switch (sc->sc_type) {
   2560 	case WM_T_82571:
   2561 	case WM_T_82572:
   2562 		reg = CSR_READ(sc, WMREG_SWSM2);
   2563 		if ((reg & SWSM2_LOCK) == 0) {
   2564 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2565 			force_clear_smbi = true;
   2566 		} else
   2567 			force_clear_smbi = false;
   2568 		break;
   2569 	case WM_T_82573:
   2570 	case WM_T_82574:
   2571 	case WM_T_82583:
   2572 		force_clear_smbi = true;
   2573 		break;
   2574 	default:
   2575 		force_clear_smbi = false;
   2576 		break;
   2577 	}
   2578 	if (force_clear_smbi) {
   2579 		reg = CSR_READ(sc, WMREG_SWSM);
   2580 		if ((reg & SWSM_SMBI) != 0)
   2581 			aprint_error_dev(sc->sc_dev,
   2582 			    "Please update the Bootagent\n");
   2583 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2584 	}
   2585 
   2586 	/*
   2587 	 * Defer printing the EEPROM type until after verifying the checksum
   2588 	 * This allows the EEPROM type to be printed correctly in the case
   2589 	 * that no EEPROM is attached.
   2590 	 */
   2591 	/*
   2592 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2593 	 * this for later, so we can fail future reads from the EEPROM.
   2594 	 */
   2595 	if (wm_nvm_validate_checksum(sc)) {
   2596 		/*
   2597 		 * Read twice again because some PCI-e parts fail the
   2598 		 * first check due to the link being in sleep state.
   2599 		 */
   2600 		if (wm_nvm_validate_checksum(sc))
   2601 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2602 	}
   2603 
   2604 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2605 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2606 	else {
   2607 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2608 		    sc->sc_nvm_wordsize);
   2609 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2610 			aprint_verbose("iNVM");
   2611 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2612 			aprint_verbose("FLASH(HW)");
   2613 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2614 			aprint_verbose("FLASH");
   2615 		else {
   2616 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2617 				eetype = "SPI";
   2618 			else
   2619 				eetype = "MicroWire";
   2620 			aprint_verbose("(%d address bits) %s EEPROM",
   2621 			    sc->sc_nvm_addrbits, eetype);
   2622 		}
   2623 	}
   2624 	wm_nvm_version(sc);
   2625 	aprint_verbose("\n");
   2626 
   2627 	/*
   2628 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2629 	 * incorrect.
   2630 	 */
   2631 	wm_gmii_setup_phytype(sc, 0, 0);
   2632 
   2633 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2634 	switch (sc->sc_type) {
   2635 	case WM_T_ICH8:
   2636 	case WM_T_ICH9:
   2637 	case WM_T_ICH10:
   2638 	case WM_T_PCH:
   2639 	case WM_T_PCH2:
   2640 	case WM_T_PCH_LPT:
   2641 	case WM_T_PCH_SPT:
   2642 	case WM_T_PCH_CNP:
   2643 		apme_mask = WUC_APME;
   2644 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2645 		if ((eeprom_data & apme_mask) != 0)
   2646 			sc->sc_flags |= WM_F_WOL;
   2647 		break;
   2648 	default:
   2649 		break;
   2650 	}
   2651 
   2652 	/* Reset the chip to a known state. */
   2653 	wm_reset(sc);
   2654 
   2655 	/*
   2656 	 * Check for I21[01] PLL workaround.
   2657 	 *
   2658 	 * Three cases:
   2659 	 * a) Chip is I211.
   2660 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2661 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2662 	 */
   2663 	if (sc->sc_type == WM_T_I211)
   2664 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2665 	if (sc->sc_type == WM_T_I210) {
   2666 		if (!wm_nvm_flash_presence_i210(sc))
   2667 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2668 		else if ((sc->sc_nvm_ver_major < 3)
   2669 		    || ((sc->sc_nvm_ver_major == 3)
   2670 			&& (sc->sc_nvm_ver_minor < 25))) {
   2671 			aprint_verbose_dev(sc->sc_dev,
   2672 			    "ROM image version %d.%d is older than 3.25\n",
   2673 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2674 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2675 		}
   2676 	}
   2677 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2678 		wm_pll_workaround_i210(sc);
   2679 
   2680 	wm_get_wakeup(sc);
   2681 
   2682 	/* Non-AMT based hardware can now take control from firmware */
   2683 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2684 		wm_get_hw_control(sc);
   2685 
   2686 	/*
   2687 	 * Read the Ethernet address from the EEPROM, if not first found
   2688 	 * in device properties.
   2689 	 */
   2690 	ea = prop_dictionary_get(dict, "mac-address");
   2691 	if (ea != NULL) {
   2692 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2693 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2694 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2695 	} else {
   2696 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2697 			aprint_error_dev(sc->sc_dev,
   2698 			    "unable to read Ethernet address\n");
   2699 			goto out;
   2700 		}
   2701 	}
   2702 
   2703 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2704 	    ether_sprintf(enaddr));
   2705 
   2706 	/*
   2707 	 * Read the config info from the EEPROM, and set up various
   2708 	 * bits in the control registers based on their contents.
   2709 	 */
   2710 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2711 	if (pn != NULL) {
   2712 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2713 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2714 	} else {
   2715 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2716 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2717 			goto out;
   2718 		}
   2719 	}
   2720 
   2721 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2722 	if (pn != NULL) {
   2723 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2724 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2725 	} else {
   2726 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2727 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2728 			goto out;
   2729 		}
   2730 	}
   2731 
   2732 	/* check for WM_F_WOL */
   2733 	switch (sc->sc_type) {
   2734 	case WM_T_82542_2_0:
   2735 	case WM_T_82542_2_1:
   2736 	case WM_T_82543:
   2737 		/* dummy? */
   2738 		eeprom_data = 0;
   2739 		apme_mask = NVM_CFG3_APME;
   2740 		break;
   2741 	case WM_T_82544:
   2742 		apme_mask = NVM_CFG2_82544_APM_EN;
   2743 		eeprom_data = cfg2;
   2744 		break;
   2745 	case WM_T_82546:
   2746 	case WM_T_82546_3:
   2747 	case WM_T_82571:
   2748 	case WM_T_82572:
   2749 	case WM_T_82573:
   2750 	case WM_T_82574:
   2751 	case WM_T_82583:
   2752 	case WM_T_80003:
   2753 	case WM_T_82575:
   2754 	case WM_T_82576:
   2755 		apme_mask = NVM_CFG3_APME;
   2756 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2757 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2758 		break;
   2759 	case WM_T_82580:
   2760 	case WM_T_I350:
   2761 	case WM_T_I354:
   2762 	case WM_T_I210:
   2763 	case WM_T_I211:
   2764 		apme_mask = NVM_CFG3_APME;
   2765 		wm_nvm_read(sc,
   2766 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2767 		    1, &eeprom_data);
   2768 		break;
   2769 	case WM_T_ICH8:
   2770 	case WM_T_ICH9:
   2771 	case WM_T_ICH10:
   2772 	case WM_T_PCH:
   2773 	case WM_T_PCH2:
   2774 	case WM_T_PCH_LPT:
   2775 	case WM_T_PCH_SPT:
   2776 	case WM_T_PCH_CNP:
   2777 		/* Already checked before wm_reset () */
   2778 		apme_mask = eeprom_data = 0;
   2779 		break;
   2780 	default: /* XXX 82540 */
   2781 		apme_mask = NVM_CFG3_APME;
   2782 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2783 		break;
   2784 	}
   2785 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2786 	if ((eeprom_data & apme_mask) != 0)
   2787 		sc->sc_flags |= WM_F_WOL;
   2788 
   2789 	/*
   2790 	 * We have the eeprom settings, now apply the special cases
   2791 	 * where the eeprom may be wrong or the board won't support
   2792 	 * wake on lan on a particular port
   2793 	 */
   2794 	switch (sc->sc_pcidevid) {
   2795 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2796 		sc->sc_flags &= ~WM_F_WOL;
   2797 		break;
   2798 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2799 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2800 		/* Wake events only supported on port A for dual fiber
   2801 		 * regardless of eeprom setting */
   2802 		if (sc->sc_funcid == 1)
   2803 			sc->sc_flags &= ~WM_F_WOL;
   2804 		break;
   2805 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2806 		/* If quad port adapter, disable WoL on all but port A */
   2807 		if (sc->sc_funcid != 0)
   2808 			sc->sc_flags &= ~WM_F_WOL;
   2809 		break;
   2810 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2811 		/* Wake events only supported on port A for dual fiber
   2812 		 * regardless of eeprom setting */
   2813 		if (sc->sc_funcid == 1)
   2814 			sc->sc_flags &= ~WM_F_WOL;
   2815 		break;
   2816 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2817 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2818 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2819 		/* If quad port adapter, disable WoL on all but port A */
   2820 		if (sc->sc_funcid != 0)
   2821 			sc->sc_flags &= ~WM_F_WOL;
   2822 		break;
   2823 	}
   2824 
   2825 	if (sc->sc_type >= WM_T_82575) {
   2826 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2827 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2828 			    nvmword);
   2829 			if ((sc->sc_type == WM_T_82575) ||
   2830 			    (sc->sc_type == WM_T_82576)) {
   2831 				/* Check NVM for autonegotiation */
   2832 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2833 				    != 0)
   2834 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2835 			}
   2836 			if ((sc->sc_type == WM_T_82575) ||
   2837 			    (sc->sc_type == WM_T_I350)) {
   2838 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2839 					sc->sc_flags |= WM_F_MAS;
   2840 			}
   2841 		}
   2842 	}
   2843 
   2844 	/*
   2845 	 * XXX need special handling for some multiple port cards
   2846 	 * to disable a paticular port.
   2847 	 */
   2848 
   2849 	if (sc->sc_type >= WM_T_82544) {
   2850 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2851 		if (pn != NULL) {
   2852 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2853 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2854 		} else {
   2855 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2856 				aprint_error_dev(sc->sc_dev,
   2857 				    "unable to read SWDPIN\n");
   2858 				goto out;
   2859 			}
   2860 		}
   2861 	}
   2862 
   2863 	if (cfg1 & NVM_CFG1_ILOS)
   2864 		sc->sc_ctrl |= CTRL_ILOS;
   2865 
   2866 	/*
   2867 	 * XXX
   2868 	 * This code isn't correct because pin 2 and 3 are located
   2869 	 * in different position on newer chips. Check all datasheet.
   2870 	 *
   2871 	 * Until resolve this problem, check if a chip < 82580
   2872 	 */
   2873 	if (sc->sc_type <= WM_T_82580) {
   2874 		if (sc->sc_type >= WM_T_82544) {
   2875 			sc->sc_ctrl |=
   2876 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2877 			    CTRL_SWDPIO_SHIFT;
   2878 			sc->sc_ctrl |=
   2879 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2880 			    CTRL_SWDPINS_SHIFT;
   2881 		} else {
   2882 			sc->sc_ctrl |=
   2883 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2884 			    CTRL_SWDPIO_SHIFT;
   2885 		}
   2886 	}
   2887 
   2888 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2889 		wm_nvm_read(sc,
   2890 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2891 		    1, &nvmword);
   2892 		if (nvmword & NVM_CFG3_ILOS)
   2893 			sc->sc_ctrl |= CTRL_ILOS;
   2894 	}
   2895 
   2896 #if 0
   2897 	if (sc->sc_type >= WM_T_82544) {
   2898 		if (cfg1 & NVM_CFG1_IPS0)
   2899 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2900 		if (cfg1 & NVM_CFG1_IPS1)
   2901 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2902 		sc->sc_ctrl_ext |=
   2903 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2904 		    CTRL_EXT_SWDPIO_SHIFT;
   2905 		sc->sc_ctrl_ext |=
   2906 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2907 		    CTRL_EXT_SWDPINS_SHIFT;
   2908 	} else {
   2909 		sc->sc_ctrl_ext |=
   2910 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2911 		    CTRL_EXT_SWDPIO_SHIFT;
   2912 	}
   2913 #endif
   2914 
   2915 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2916 #if 0
   2917 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2918 #endif
   2919 
   2920 	if (sc->sc_type == WM_T_PCH) {
   2921 		uint16_t val;
   2922 
   2923 		/* Save the NVM K1 bit setting */
   2924 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2925 
   2926 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2927 			sc->sc_nvm_k1_enabled = 1;
   2928 		else
   2929 			sc->sc_nvm_k1_enabled = 0;
   2930 	}
   2931 
   2932 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2933 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2934 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2935 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2936 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2937 	    || sc->sc_type == WM_T_82573
   2938 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2939 		/* Copper only */
   2940 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2941 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2942 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2943 	    || (sc->sc_type ==WM_T_I211)) {
   2944 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2945 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2946 		switch (link_mode) {
   2947 		case CTRL_EXT_LINK_MODE_1000KX:
   2948 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2949 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2950 			break;
   2951 		case CTRL_EXT_LINK_MODE_SGMII:
   2952 			if (wm_sgmii_uses_mdio(sc)) {
   2953 				aprint_normal_dev(sc->sc_dev,
   2954 				    "SGMII(MDIO)\n");
   2955 				sc->sc_flags |= WM_F_SGMII;
   2956 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2957 				break;
   2958 			}
   2959 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2960 			/*FALLTHROUGH*/
   2961 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2962 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2963 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2964 				if (link_mode
   2965 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2966 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2967 					sc->sc_flags |= WM_F_SGMII;
   2968 					aprint_verbose_dev(sc->sc_dev,
   2969 					    "SGMII\n");
   2970 				} else {
   2971 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2972 					aprint_verbose_dev(sc->sc_dev,
   2973 					    "SERDES\n");
   2974 				}
   2975 				break;
   2976 			}
   2977 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2978 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2979 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2980 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2981 				sc->sc_flags |= WM_F_SGMII;
   2982 			}
   2983 			/* Do not change link mode for 100BaseFX */
   2984 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   2985 				break;
   2986 
   2987 			/* Change current link mode setting */
   2988 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2989 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2990 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2991 			else
   2992 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2993 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2994 			break;
   2995 		case CTRL_EXT_LINK_MODE_GMII:
   2996 		default:
   2997 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   2998 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2999 			break;
   3000 		}
   3001 
   3002 		reg &= ~CTRL_EXT_I2C_ENA;
   3003 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   3004 			reg |= CTRL_EXT_I2C_ENA;
   3005 		else
   3006 			reg &= ~CTRL_EXT_I2C_ENA;
   3007 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3008 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   3009 			if (!wm_sgmii_uses_mdio(sc))
   3010 				wm_gmii_setup_phytype(sc, 0, 0);
   3011 			wm_reset_mdicnfg_82580(sc);
   3012 		}
   3013 	} else if (sc->sc_type < WM_T_82543 ||
   3014 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   3015 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   3016 			aprint_error_dev(sc->sc_dev,
   3017 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   3018 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   3019 		}
   3020 	} else {
   3021 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   3022 			aprint_error_dev(sc->sc_dev,
   3023 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   3024 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3025 		}
   3026 	}
   3027 
   3028 	if (sc->sc_type >= WM_T_PCH2)
   3029 		sc->sc_flags |= WM_F_EEE;
   3030 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   3031 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   3032 		/* XXX: Need special handling for I354. (not yet) */
   3033 		if (sc->sc_type != WM_T_I354)
   3034 			sc->sc_flags |= WM_F_EEE;
   3035 	}
   3036 
   3037 	/*
   3038 	 * The I350 has a bug where it always strips the CRC whether
   3039 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   3040 	 */
   3041 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3042 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3043 		sc->sc_flags |= WM_F_CRC_STRIP;
   3044 
   3045 	/* Set device properties (macflags) */
   3046 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   3047 
   3048 	if (sc->sc_flags != 0) {
   3049 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   3050 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   3051 	}
   3052 
   3053 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   3054 
   3055 	/* Initialize the media structures accordingly. */
   3056 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   3057 		wm_gmii_mediainit(sc, wmp->wmp_product);
   3058 	else
   3059 		wm_tbi_mediainit(sc); /* All others */
   3060 
   3061 	ifp = &sc->sc_ethercom.ec_if;
   3062 	xname = device_xname(sc->sc_dev);
   3063 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   3064 	ifp->if_softc = sc;
   3065 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   3066 	ifp->if_extflags = IFEF_MPSAFE;
   3067 	ifp->if_ioctl = wm_ioctl;
   3068 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3069 		ifp->if_start = wm_nq_start;
   3070 		/*
   3071 		 * When the number of CPUs is one and the controller can use
   3072 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   3073 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   3074 		 * and the other is used for link status changing.
   3075 		 * In this situation, wm_nq_transmit() is disadvantageous
   3076 		 * because of wm_select_txqueue() and pcq(9) overhead.
   3077 		 */
   3078 		if (wm_is_using_multiqueue(sc))
   3079 			ifp->if_transmit = wm_nq_transmit;
   3080 	} else {
   3081 		ifp->if_start = wm_start;
   3082 		/*
   3083 		 * wm_transmit() has the same disadvantages as wm_nq_transmit()
   3084 		 * described above.
   3085 		 */
   3086 		if (wm_is_using_multiqueue(sc))
   3087 			ifp->if_transmit = wm_transmit;
   3088 	}
   3089 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   3090 	ifp->if_init = wm_init;
   3091 	ifp->if_stop = wm_stop;
   3092 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   3093 	IFQ_SET_READY(&ifp->if_snd);
   3094 
   3095 	/* Check for jumbo frame */
   3096 	switch (sc->sc_type) {
   3097 	case WM_T_82573:
   3098 		/* XXX limited to 9234 if ASPM is disabled */
   3099 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   3100 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   3101 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3102 		break;
   3103 	case WM_T_82571:
   3104 	case WM_T_82572:
   3105 	case WM_T_82574:
   3106 	case WM_T_82583:
   3107 	case WM_T_82575:
   3108 	case WM_T_82576:
   3109 	case WM_T_82580:
   3110 	case WM_T_I350:
   3111 	case WM_T_I354:
   3112 	case WM_T_I210:
   3113 	case WM_T_I211:
   3114 	case WM_T_80003:
   3115 	case WM_T_ICH9:
   3116 	case WM_T_ICH10:
   3117 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   3118 	case WM_T_PCH_LPT:
   3119 	case WM_T_PCH_SPT:
   3120 	case WM_T_PCH_CNP:
   3121 		/* XXX limited to 9234 */
   3122 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3123 		break;
   3124 	case WM_T_PCH:
   3125 		/* XXX limited to 4096 */
   3126 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3127 		break;
   3128 	case WM_T_82542_2_0:
   3129 	case WM_T_82542_2_1:
   3130 	case WM_T_ICH8:
   3131 		/* No support for jumbo frame */
   3132 		break;
   3133 	default:
   3134 		/* ETHER_MAX_LEN_JUMBO */
   3135 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3136 		break;
   3137 	}
   3138 
   3139 	/* If we're a i82543 or greater, we can support VLANs. */
   3140 	if (sc->sc_type >= WM_T_82543) {
   3141 		sc->sc_ethercom.ec_capabilities |=
   3142 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   3143 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   3144 	}
   3145 
   3146 	if ((sc->sc_flags & WM_F_EEE) != 0)
   3147 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   3148 
   3149 	/*
   3150 	 * We can perform TCPv4 and UDPv4 checksums in-bound.  Only
   3151 	 * on i82543 and later.
   3152 	 */
   3153 	if (sc->sc_type >= WM_T_82543) {
   3154 		ifp->if_capabilities |=
   3155 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3156 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3157 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3158 		    IFCAP_CSUM_TCPv6_Tx |
   3159 		    IFCAP_CSUM_UDPv6_Tx;
   3160 	}
   3161 
   3162 	/*
   3163 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3164 	 *
   3165 	 *	82541GI (8086:1076) ... no
   3166 	 *	82572EI (8086:10b9) ... yes
   3167 	 */
   3168 	if (sc->sc_type >= WM_T_82571) {
   3169 		ifp->if_capabilities |=
   3170 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3171 	}
   3172 
   3173 	/*
   3174 	 * If we're a i82544 or greater (except i82547), we can do
   3175 	 * TCP segmentation offload.
   3176 	 */
   3177 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547)
   3178 		ifp->if_capabilities |= IFCAP_TSOv4;
   3179 
   3180 	if (sc->sc_type >= WM_T_82571)
   3181 		ifp->if_capabilities |= IFCAP_TSOv6;
   3182 
   3183 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3184 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3185 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3186 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3187 
   3188 	/* Attach the interface. */
   3189 	if_initialize(ifp);
   3190 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3191 	ether_ifattach(ifp, enaddr);
   3192 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3193 	if_register(ifp);
   3194 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3195 	    RND_FLAG_DEFAULT);
   3196 
   3197 #ifdef WM_EVENT_COUNTERS
   3198 	/* Attach event counters. */
   3199 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3200 	    NULL, xname, "linkintr");
   3201 
   3202 	if (sc->sc_type >= WM_T_82542_2_1) {
   3203 		evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3204 		    NULL, xname, "tx_xoff");
   3205 		evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3206 		    NULL, xname, "tx_xon");
   3207 		evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3208 		    NULL, xname, "rx_xoff");
   3209 		evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3210 		    NULL, xname, "rx_xon");
   3211 		evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3212 		    NULL, xname, "rx_macctl");
   3213 	}
   3214 
   3215 	evcnt_attach_dynamic(&sc->sc_ev_crcerrs, EVCNT_TYPE_MISC,
   3216 	    NULL, xname, "CRC Error");
   3217 	evcnt_attach_dynamic(&sc->sc_ev_symerrc, EVCNT_TYPE_MISC,
   3218 	    NULL, xname, "Symbol Error");
   3219 
   3220 	if (sc->sc_type >= WM_T_82543) {
   3221 		evcnt_attach_dynamic(&sc->sc_ev_algnerrc, EVCNT_TYPE_MISC,
   3222 		    NULL, xname, "Alignment Error");
   3223 		evcnt_attach_dynamic(&sc->sc_ev_rxerrc, EVCNT_TYPE_MISC,
   3224 		    NULL, xname, "Receive Error");
   3225 		evcnt_attach_dynamic(&sc->sc_ev_cexterr, EVCNT_TYPE_MISC,
   3226 		    NULL, xname, "Carrier Extension Error");
   3227 	}
   3228 
   3229 	evcnt_attach_dynamic(&sc->sc_ev_mpc, EVCNT_TYPE_MISC,
   3230 	    NULL, xname, "Missed Packets");
   3231 	evcnt_attach_dynamic(&sc->sc_ev_colc, EVCNT_TYPE_MISC,
   3232 	    NULL, xname, "Collision");
   3233 	evcnt_attach_dynamic(&sc->sc_ev_sec, EVCNT_TYPE_MISC,
   3234 	    NULL, xname, "Sequence Error");
   3235 	evcnt_attach_dynamic(&sc->sc_ev_rlec, EVCNT_TYPE_MISC,
   3236 	    NULL, xname, "Receive Length Error");
   3237 	evcnt_attach_dynamic(&sc->sc_ev_scc, EVCNT_TYPE_MISC,
   3238 	    NULL, xname, "Single Collision");
   3239 	evcnt_attach_dynamic(&sc->sc_ev_ecol, EVCNT_TYPE_MISC,
   3240 	    NULL, xname, "Excessive Collisions");
   3241 	evcnt_attach_dynamic(&sc->sc_ev_mcc, EVCNT_TYPE_MISC,
   3242 	    NULL, xname, "Multiple Collision");
   3243 	evcnt_attach_dynamic(&sc->sc_ev_latecol, EVCNT_TYPE_MISC,
   3244 	    NULL, xname, "Late Collisions");
   3245 	evcnt_attach_dynamic(&sc->sc_ev_dc, EVCNT_TYPE_MISC,
   3246 	    NULL, xname, "Defer");
   3247 	evcnt_attach_dynamic(&sc->sc_ev_gprc, EVCNT_TYPE_MISC,
   3248 	    NULL, xname, "Good Packets Rx");
   3249 	evcnt_attach_dynamic(&sc->sc_ev_bprc, EVCNT_TYPE_MISC,
   3250 	    NULL, xname, "Broadcast Packets Rx");
   3251 	evcnt_attach_dynamic(&sc->sc_ev_mprc, EVCNT_TYPE_MISC,
   3252 	    NULL, xname, "Multicast Packets Rx");
   3253 	evcnt_attach_dynamic(&sc->sc_ev_gptc, EVCNT_TYPE_MISC,
   3254 	    NULL, xname, "Good Packets Tx");
   3255 	evcnt_attach_dynamic(&sc->sc_ev_gorc, EVCNT_TYPE_MISC,
   3256 	    NULL, xname, "Good Octets Rx");
   3257 	evcnt_attach_dynamic(&sc->sc_ev_gotc, EVCNT_TYPE_MISC,
   3258 	    NULL, xname, "Good Octets Tx");
   3259 	evcnt_attach_dynamic(&sc->sc_ev_rnbc, EVCNT_TYPE_MISC,
   3260 	    NULL, xname, "Rx No Buffers");
   3261 	evcnt_attach_dynamic(&sc->sc_ev_ruc, EVCNT_TYPE_MISC,
   3262 	    NULL, xname, "Rx Undersize");
   3263 	evcnt_attach_dynamic(&sc->sc_ev_rfc, EVCNT_TYPE_MISC,
   3264 	    NULL, xname, "Rx Fragment");
   3265 	evcnt_attach_dynamic(&sc->sc_ev_roc, EVCNT_TYPE_MISC,
   3266 	    NULL, xname, "Rx Oversize");
   3267 	evcnt_attach_dynamic(&sc->sc_ev_rjc, EVCNT_TYPE_MISC,
   3268 	    NULL, xname, "Rx Jabber");
   3269 	evcnt_attach_dynamic(&sc->sc_ev_tor, EVCNT_TYPE_MISC,
   3270 	    NULL, xname, "Total Octets Rx");
   3271 	evcnt_attach_dynamic(&sc->sc_ev_tot, EVCNT_TYPE_MISC,
   3272 	    NULL, xname, "Total Octets Tx");
   3273 	evcnt_attach_dynamic(&sc->sc_ev_tpr, EVCNT_TYPE_MISC,
   3274 	    NULL, xname, "Total Packets Rx");
   3275 	evcnt_attach_dynamic(&sc->sc_ev_tpt, EVCNT_TYPE_MISC,
   3276 	    NULL, xname, "Total Packets Tx");
   3277 	evcnt_attach_dynamic(&sc->sc_ev_mptc, EVCNT_TYPE_MISC,
   3278 	    NULL, xname, "Multicast Packets Tx");
   3279 	evcnt_attach_dynamic(&sc->sc_ev_bptc, EVCNT_TYPE_MISC,
   3280 	    NULL, xname, "Broadcast Packets Tx Count");
   3281 	evcnt_attach_dynamic(&sc->sc_ev_prc64, EVCNT_TYPE_MISC,
   3282 	    NULL, xname, "Packets Rx (64 bytes)");
   3283 	evcnt_attach_dynamic(&sc->sc_ev_prc127, EVCNT_TYPE_MISC,
   3284 	    NULL, xname, "Packets Rx (65-127 bytes)");
   3285 	evcnt_attach_dynamic(&sc->sc_ev_prc255, EVCNT_TYPE_MISC,
   3286 	    NULL, xname, "Packets Rx (128-255 bytes)");
   3287 	evcnt_attach_dynamic(&sc->sc_ev_prc511, EVCNT_TYPE_MISC,
   3288 	    NULL, xname, "Packets Rx (255-511 bytes)");
   3289 	evcnt_attach_dynamic(&sc->sc_ev_prc1023, EVCNT_TYPE_MISC,
   3290 	    NULL, xname, "Packets Rx (512-1023 bytes)");
   3291 	evcnt_attach_dynamic(&sc->sc_ev_prc1522, EVCNT_TYPE_MISC,
   3292 	    NULL, xname, "Packets Rx (1024-1522 bytes)");
   3293 	evcnt_attach_dynamic(&sc->sc_ev_ptc64, EVCNT_TYPE_MISC,
   3294 	    NULL, xname, "Packets Tx (64 bytes)");
   3295 	evcnt_attach_dynamic(&sc->sc_ev_ptc127, EVCNT_TYPE_MISC,
   3296 	    NULL, xname, "Packets Tx (65-127 bytes)");
   3297 	evcnt_attach_dynamic(&sc->sc_ev_ptc255, EVCNT_TYPE_MISC,
   3298 	    NULL, xname, "Packets Tx (128-255 bytes)");
   3299 	evcnt_attach_dynamic(&sc->sc_ev_ptc511, EVCNT_TYPE_MISC,
   3300 	    NULL, xname, "Packets Tx (256-511 bytes)");
   3301 	evcnt_attach_dynamic(&sc->sc_ev_ptc1023, EVCNT_TYPE_MISC,
   3302 	    NULL, xname, "Packets Tx (512-1023 bytes)");
   3303 	evcnt_attach_dynamic(&sc->sc_ev_ptc1522, EVCNT_TYPE_MISC,
   3304 	    NULL, xname, "Packets Tx (1024-1522 Bytes)");
   3305 	evcnt_attach_dynamic(&sc->sc_ev_iac, EVCNT_TYPE_MISC,
   3306 	    NULL, xname, "Interrupt Assertion");
   3307 	evcnt_attach_dynamic(&sc->sc_ev_icrxptc, EVCNT_TYPE_MISC,
   3308 	    NULL, xname, "Intr. Cause Rx Pkt Timer Expire");
   3309 	evcnt_attach_dynamic(&sc->sc_ev_icrxatc, EVCNT_TYPE_MISC,
   3310 	    NULL, xname, "Intr. Cause Rx Abs Timer Expire");
   3311 	evcnt_attach_dynamic(&sc->sc_ev_ictxptc, EVCNT_TYPE_MISC,
   3312 	    NULL, xname, "Intr. Cause Tx Pkt Timer Expire");
   3313 	evcnt_attach_dynamic(&sc->sc_ev_ictxact, EVCNT_TYPE_MISC,
   3314 	    NULL, xname, "Intr. Cause Tx Abs Timer Expire");
   3315 	evcnt_attach_dynamic(&sc->sc_ev_ictxqec, EVCNT_TYPE_MISC,
   3316 	    NULL, xname, "Intr. Cause Tx Queue Empty");
   3317 	evcnt_attach_dynamic(&sc->sc_ev_ictxqmtc, EVCNT_TYPE_MISC,
   3318 	    NULL, xname, "Intr. Cause Tx Queue Min Thresh");
   3319 	evcnt_attach_dynamic(&sc->sc_ev_icrxdmtc, EVCNT_TYPE_MISC,
   3320 	    NULL, xname, "Intr. Cause Rx Desc Min Thresh");
   3321 	evcnt_attach_dynamic(&sc->sc_ev_icrxoc, EVCNT_TYPE_MISC,
   3322 	    NULL, xname, "Interrupt Cause Receiver Overrun");
   3323 	if (sc->sc_type >= WM_T_82543) {
   3324 		evcnt_attach_dynamic(&sc->sc_ev_tncrs, EVCNT_TYPE_MISC,
   3325 		    NULL, xname, "Tx with No CRS");
   3326 		evcnt_attach_dynamic(&sc->sc_ev_tsctc, EVCNT_TYPE_MISC,
   3327 		    NULL, xname, "TCP Segmentation Context Tx");
   3328 		evcnt_attach_dynamic(&sc->sc_ev_tsctfc, EVCNT_TYPE_MISC,
   3329 		    NULL, xname, "TCP Segmentation Context Tx Fail");
   3330 	}
   3331 	if (sc->sc_type >= WM_T_82540) {
   3332 		evcnt_attach_dynamic(&sc->sc_ev_mgtprc, EVCNT_TYPE_MISC,
   3333 		    NULL, xname, "Management Packets RX");
   3334 		evcnt_attach_dynamic(&sc->sc_ev_mgtpdc, EVCNT_TYPE_MISC,
   3335 		    NULL, xname, "Management Packets Dropped");
   3336 		evcnt_attach_dynamic(&sc->sc_ev_mgtptc, EVCNT_TYPE_MISC,
   3337 		    NULL, xname, "Management Packets TX");
   3338 	}
   3339 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003)) {
   3340 		evcnt_attach_dynamic(&sc->sc_ev_b2ogprc, EVCNT_TYPE_MISC,
   3341 		    NULL, xname, "BMC2OS Packets received by host");
   3342 		evcnt_attach_dynamic(&sc->sc_ev_o2bspc, EVCNT_TYPE_MISC,
   3343 		    NULL, xname, "OS2BMC Packets transmitted by host");
   3344 		evcnt_attach_dynamic(&sc->sc_ev_b2ospc, EVCNT_TYPE_MISC,
   3345 		    NULL, xname, "BMC2OS Packets sent by BMC");
   3346 		evcnt_attach_dynamic(&sc->sc_ev_o2bgptc, EVCNT_TYPE_MISC,
   3347 		    NULL, xname, "OS2BMC Packets received by BMC");
   3348 	}
   3349 #endif /* WM_EVENT_COUNTERS */
   3350 
   3351 	sc->sc_txrx_use_workqueue = false;
   3352 
   3353 	if (wm_phy_need_linkdown_discard(sc)) {
   3354 		DPRINTF(sc, WM_DEBUG_LINK,
   3355 		    ("%s: %s: Set linkdown discard flag\n",
   3356 			device_xname(sc->sc_dev), __func__));
   3357 		wm_set_linkdown_discard(sc);
   3358 	}
   3359 
   3360 	wm_init_sysctls(sc);
   3361 
   3362 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3363 		pmf_class_network_register(self, ifp);
   3364 	else
   3365 		aprint_error_dev(self, "couldn't establish power handler\n");
   3366 
   3367 	sc->sc_flags |= WM_F_ATTACHED;
   3368 out:
   3369 	return;
   3370 }
   3371 
   3372 /* The detach function (ca_detach) */
   3373 static int
   3374 wm_detach(device_t self, int flags __unused)
   3375 {
   3376 	struct wm_softc *sc = device_private(self);
   3377 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3378 	int i;
   3379 
   3380 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3381 		return 0;
   3382 
   3383 	/* Stop the interface. Callouts are stopped in it. */
   3384 	IFNET_LOCK(ifp);
   3385 	sc->sc_dying = true;
   3386 	wm_stop(ifp, 1);
   3387 	IFNET_UNLOCK(ifp);
   3388 
   3389 	pmf_device_deregister(self);
   3390 
   3391 	sysctl_teardown(&sc->sc_sysctllog);
   3392 
   3393 #ifdef WM_EVENT_COUNTERS
   3394 	evcnt_detach(&sc->sc_ev_linkintr);
   3395 
   3396 	if (sc->sc_type >= WM_T_82542_2_1) {
   3397 		evcnt_detach(&sc->sc_ev_tx_xoff);
   3398 		evcnt_detach(&sc->sc_ev_tx_xon);
   3399 		evcnt_detach(&sc->sc_ev_rx_xoff);
   3400 		evcnt_detach(&sc->sc_ev_rx_xon);
   3401 		evcnt_detach(&sc->sc_ev_rx_macctl);
   3402 	}
   3403 
   3404 	evcnt_detach(&sc->sc_ev_crcerrs);
   3405 	evcnt_detach(&sc->sc_ev_symerrc);
   3406 
   3407 	if (sc->sc_type >= WM_T_82543) {
   3408 		evcnt_detach(&sc->sc_ev_algnerrc);
   3409 		evcnt_detach(&sc->sc_ev_rxerrc);
   3410 		evcnt_detach(&sc->sc_ev_cexterr);
   3411 	}
   3412 	evcnt_detach(&sc->sc_ev_mpc);
   3413 	evcnt_detach(&sc->sc_ev_colc);
   3414 	evcnt_detach(&sc->sc_ev_sec);
   3415 	evcnt_detach(&sc->sc_ev_rlec);
   3416 	evcnt_detach(&sc->sc_ev_scc);
   3417 	evcnt_detach(&sc->sc_ev_ecol);
   3418 	evcnt_detach(&sc->sc_ev_mcc);
   3419 	evcnt_detach(&sc->sc_ev_latecol);
   3420 	evcnt_detach(&sc->sc_ev_dc);
   3421 	evcnt_detach(&sc->sc_ev_gprc);
   3422 	evcnt_detach(&sc->sc_ev_bprc);
   3423 	evcnt_detach(&sc->sc_ev_mprc);
   3424 	evcnt_detach(&sc->sc_ev_gptc);
   3425 	evcnt_detach(&sc->sc_ev_gorc);
   3426 	evcnt_detach(&sc->sc_ev_gotc);
   3427 	evcnt_detach(&sc->sc_ev_rnbc);
   3428 	evcnt_detach(&sc->sc_ev_ruc);
   3429 	evcnt_detach(&sc->sc_ev_rfc);
   3430 	evcnt_detach(&sc->sc_ev_roc);
   3431 	evcnt_detach(&sc->sc_ev_rjc);
   3432 	evcnt_detach(&sc->sc_ev_tor);
   3433 	evcnt_detach(&sc->sc_ev_tot);
   3434 	evcnt_detach(&sc->sc_ev_tpr);
   3435 	evcnt_detach(&sc->sc_ev_tpt);
   3436 	evcnt_detach(&sc->sc_ev_mptc);
   3437 	evcnt_detach(&sc->sc_ev_bptc);
   3438 	evcnt_detach(&sc->sc_ev_prc64);
   3439 	evcnt_detach(&sc->sc_ev_prc127);
   3440 	evcnt_detach(&sc->sc_ev_prc255);
   3441 	evcnt_detach(&sc->sc_ev_prc511);
   3442 	evcnt_detach(&sc->sc_ev_prc1023);
   3443 	evcnt_detach(&sc->sc_ev_prc1522);
   3444 	evcnt_detach(&sc->sc_ev_ptc64);
   3445 	evcnt_detach(&sc->sc_ev_ptc127);
   3446 	evcnt_detach(&sc->sc_ev_ptc255);
   3447 	evcnt_detach(&sc->sc_ev_ptc511);
   3448 	evcnt_detach(&sc->sc_ev_ptc1023);
   3449 	evcnt_detach(&sc->sc_ev_ptc1522);
   3450 	evcnt_detach(&sc->sc_ev_iac);
   3451 	evcnt_detach(&sc->sc_ev_icrxptc);
   3452 	evcnt_detach(&sc->sc_ev_icrxatc);
   3453 	evcnt_detach(&sc->sc_ev_ictxptc);
   3454 	evcnt_detach(&sc->sc_ev_ictxact);
   3455 	evcnt_detach(&sc->sc_ev_ictxqec);
   3456 	evcnt_detach(&sc->sc_ev_ictxqmtc);
   3457 	evcnt_detach(&sc->sc_ev_icrxdmtc);
   3458 	evcnt_detach(&sc->sc_ev_icrxoc);
   3459 	if (sc->sc_type >= WM_T_82543) {
   3460 		evcnt_detach(&sc->sc_ev_tncrs);
   3461 		evcnt_detach(&sc->sc_ev_tsctc);
   3462 		evcnt_detach(&sc->sc_ev_tsctfc);
   3463 	}
   3464 	if (sc->sc_type >= WM_T_82540) {
   3465 		evcnt_detach(&sc->sc_ev_mgtprc);
   3466 		evcnt_detach(&sc->sc_ev_mgtpdc);
   3467 		evcnt_detach(&sc->sc_ev_mgtptc);
   3468 	}
   3469 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003)) {
   3470 		evcnt_detach(&sc->sc_ev_b2ogprc);
   3471 		evcnt_detach(&sc->sc_ev_o2bspc);
   3472 		evcnt_detach(&sc->sc_ev_b2ospc);
   3473 		evcnt_detach(&sc->sc_ev_o2bgptc);
   3474 	}
   3475 #endif /* WM_EVENT_COUNTERS */
   3476 
   3477 	rnd_detach_source(&sc->rnd_source);
   3478 
   3479 	/* Tell the firmware about the release */
   3480 	mutex_enter(sc->sc_core_lock);
   3481 	wm_release_manageability(sc);
   3482 	wm_release_hw_control(sc);
   3483 	wm_enable_wakeup(sc);
   3484 	mutex_exit(sc->sc_core_lock);
   3485 
   3486 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3487 
   3488 	ether_ifdetach(ifp);
   3489 	if_detach(ifp);
   3490 	if_percpuq_destroy(sc->sc_ipq);
   3491 
   3492 	/* Delete all remaining media. */
   3493 	ifmedia_fini(&sc->sc_mii.mii_media);
   3494 
   3495 	/* Unload RX dmamaps and free mbufs */
   3496 	for (i = 0; i < sc->sc_nqueues; i++) {
   3497 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3498 		mutex_enter(rxq->rxq_lock);
   3499 		wm_rxdrain(rxq);
   3500 		mutex_exit(rxq->rxq_lock);
   3501 	}
   3502 	/* Must unlock here */
   3503 
   3504 	/* Disestablish the interrupt handler */
   3505 	for (i = 0; i < sc->sc_nintrs; i++) {
   3506 		if (sc->sc_ihs[i] != NULL) {
   3507 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3508 			sc->sc_ihs[i] = NULL;
   3509 		}
   3510 	}
   3511 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3512 
   3513 	/* wm_stop() ensured that the workqueues are stopped. */
   3514 	workqueue_destroy(sc->sc_queue_wq);
   3515 	workqueue_destroy(sc->sc_reset_wq);
   3516 
   3517 	for (i = 0; i < sc->sc_nqueues; i++)
   3518 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3519 
   3520 	wm_free_txrx_queues(sc);
   3521 
   3522 	/* Unmap the registers */
   3523 	if (sc->sc_ss) {
   3524 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3525 		sc->sc_ss = 0;
   3526 	}
   3527 	if (sc->sc_ios) {
   3528 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3529 		sc->sc_ios = 0;
   3530 	}
   3531 	if (sc->sc_flashs) {
   3532 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3533 		sc->sc_flashs = 0;
   3534 	}
   3535 
   3536 	if (sc->sc_core_lock)
   3537 		mutex_obj_free(sc->sc_core_lock);
   3538 	if (sc->sc_ich_phymtx)
   3539 		mutex_obj_free(sc->sc_ich_phymtx);
   3540 	if (sc->sc_ich_nvmmtx)
   3541 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3542 
   3543 	return 0;
   3544 }
   3545 
   3546 static bool
   3547 wm_suspend(device_t self, const pmf_qual_t *qual)
   3548 {
   3549 	struct wm_softc *sc = device_private(self);
   3550 
   3551 	wm_release_manageability(sc);
   3552 	wm_release_hw_control(sc);
   3553 	wm_enable_wakeup(sc);
   3554 
   3555 	return true;
   3556 }
   3557 
   3558 static bool
   3559 wm_resume(device_t self, const pmf_qual_t *qual)
   3560 {
   3561 	struct wm_softc *sc = device_private(self);
   3562 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3563 	pcireg_t reg;
   3564 	char buf[256];
   3565 
   3566 	reg = CSR_READ(sc, WMREG_WUS);
   3567 	if (reg != 0) {
   3568 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3569 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3570 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3571 	}
   3572 
   3573 	if (sc->sc_type >= WM_T_PCH2)
   3574 		wm_resume_workarounds_pchlan(sc);
   3575 	IFNET_LOCK(ifp);
   3576 	if ((ifp->if_flags & IFF_UP) == 0) {
   3577 		/* >= PCH_SPT hardware workaround before reset. */
   3578 		if (sc->sc_type >= WM_T_PCH_SPT)
   3579 			wm_flush_desc_rings(sc);
   3580 
   3581 		wm_reset(sc);
   3582 		/* Non-AMT based hardware can now take control from firmware */
   3583 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3584 			wm_get_hw_control(sc);
   3585 		wm_init_manageability(sc);
   3586 	} else {
   3587 		/*
   3588 		 * We called pmf_class_network_register(), so if_init() is
   3589 		 * automatically called when IFF_UP. wm_reset(),
   3590 		 * wm_get_hw_control() and wm_init_manageability() are called
   3591 		 * via wm_init().
   3592 		 */
   3593 	}
   3594 	IFNET_UNLOCK(ifp);
   3595 
   3596 	return true;
   3597 }
   3598 
   3599 /*
   3600  * wm_watchdog:
   3601  *
   3602  *	Watchdog checker.
   3603  */
   3604 static bool
   3605 wm_watchdog(struct ifnet *ifp)
   3606 {
   3607 	int qid;
   3608 	struct wm_softc *sc = ifp->if_softc;
   3609 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3610 
   3611 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3612 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3613 
   3614 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3615 	}
   3616 
   3617 #ifdef WM_DEBUG
   3618 	if (sc->sc_trigger_reset) {
   3619 		/* debug operation, no need for atomicity or reliability */
   3620 		sc->sc_trigger_reset = 0;
   3621 		hang_queue++;
   3622 	}
   3623 #endif
   3624 
   3625 	if (hang_queue == 0)
   3626 		return true;
   3627 
   3628 	if (atomic_swap_uint(&sc->sc_reset_pending, 1) == 0)
   3629 		workqueue_enqueue(sc->sc_reset_wq, &sc->sc_reset_work, NULL);
   3630 
   3631 	return false;
   3632 }
   3633 
   3634 /*
   3635  * Perform an interface watchdog reset.
   3636  */
   3637 static void
   3638 wm_handle_reset_work(struct work *work, void *arg)
   3639 {
   3640 	struct wm_softc * const sc = arg;
   3641 	struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
   3642 
   3643 	/* Don't want ioctl operations to happen */
   3644 	IFNET_LOCK(ifp);
   3645 
   3646 	/* reset the interface. */
   3647 	wm_init(ifp);
   3648 
   3649 	IFNET_UNLOCK(ifp);
   3650 
   3651 	/*
   3652 	 * There are still some upper layer processing which call
   3653 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   3654 	 */
   3655 	/* Try to get more packets going. */
   3656 	ifp->if_start(ifp);
   3657 
   3658 	atomic_store_relaxed(&sc->sc_reset_pending, 0);
   3659 }
   3660 
   3661 
   3662 static void
   3663 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3664 {
   3665 
   3666 	mutex_enter(txq->txq_lock);
   3667 	if (txq->txq_sending &&
   3668 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3669 		wm_watchdog_txq_locked(ifp, txq, hang);
   3670 
   3671 	mutex_exit(txq->txq_lock);
   3672 }
   3673 
   3674 static void
   3675 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3676     uint16_t *hang)
   3677 {
   3678 	struct wm_softc *sc = ifp->if_softc;
   3679 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3680 
   3681 	KASSERT(mutex_owned(txq->txq_lock));
   3682 
   3683 	/*
   3684 	 * Since we're using delayed interrupts, sweep up
   3685 	 * before we report an error.
   3686 	 */
   3687 	wm_txeof(txq, UINT_MAX);
   3688 
   3689 	if (txq->txq_sending)
   3690 		*hang |= __BIT(wmq->wmq_id);
   3691 
   3692 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3693 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3694 		    device_xname(sc->sc_dev));
   3695 	} else {
   3696 #ifdef WM_DEBUG
   3697 		int i, j;
   3698 		struct wm_txsoft *txs;
   3699 #endif
   3700 		log(LOG_ERR,
   3701 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3702 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3703 		    txq->txq_next);
   3704 		if_statinc(ifp, if_oerrors);
   3705 #ifdef WM_DEBUG
   3706 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3707 		     i = WM_NEXTTXS(txq, i)) {
   3708 			txs = &txq->txq_soft[i];
   3709 			printf("txs %d tx %d -> %d\n",
   3710 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3711 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3712 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3713 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3714 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3715 					printf("\t %#08x%08x\n",
   3716 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3717 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3718 				} else {
   3719 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3720 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3721 					    txq->txq_descs[j].wtx_addr.wa_low);
   3722 					printf("\t %#04x%02x%02x%08x\n",
   3723 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3724 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3725 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3726 					    txq->txq_descs[j].wtx_cmdlen);
   3727 				}
   3728 				if (j == txs->txs_lastdesc)
   3729 					break;
   3730 			}
   3731 		}
   3732 #endif
   3733 	}
   3734 }
   3735 
   3736 /*
   3737  * wm_tick:
   3738  *
   3739  *	One second timer, used to check link status, sweep up
   3740  *	completed transmit jobs, etc.
   3741  */
   3742 static void
   3743 wm_tick(void *arg)
   3744 {
   3745 	struct wm_softc *sc = arg;
   3746 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3747 	uint64_t crcerrs, algnerrc, symerrc, mpc, colc,  sec, rlec, rxerrc,
   3748 	    cexterr;
   3749 
   3750 	mutex_enter(sc->sc_core_lock);
   3751 
   3752 	if (sc->sc_core_stopping) {
   3753 		mutex_exit(sc->sc_core_lock);
   3754 		return;
   3755 	}
   3756 
   3757 	crcerrs = CSR_READ(sc, WMREG_CRCERRS);
   3758 	symerrc = CSR_READ(sc, WMREG_SYMERRC);
   3759 	mpc = CSR_READ(sc, WMREG_MPC);
   3760 	colc = CSR_READ(sc, WMREG_COLC);
   3761 	sec = CSR_READ(sc, WMREG_SEC);
   3762 	rlec = CSR_READ(sc, WMREG_RLEC);
   3763 
   3764 	WM_EVCNT_ADD(&sc->sc_ev_crcerrs, crcerrs);
   3765 	WM_EVCNT_ADD(&sc->sc_ev_symerrc, symerrc);
   3766 	WM_EVCNT_ADD(&sc->sc_ev_mpc, mpc);
   3767 	WM_EVCNT_ADD(&sc->sc_ev_colc, colc);
   3768 	WM_EVCNT_ADD(&sc->sc_ev_sec, sec);
   3769 	WM_EVCNT_ADD(&sc->sc_ev_rlec, rlec);
   3770 
   3771 	if (sc->sc_type >= WM_T_82542_2_1) {
   3772 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3773 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3774 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3775 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3776 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3777 	}
   3778 	WM_EVCNT_ADD(&sc->sc_ev_scc, CSR_READ(sc, WMREG_SCC));
   3779 	WM_EVCNT_ADD(&sc->sc_ev_ecol, CSR_READ(sc, WMREG_ECOL));
   3780 	WM_EVCNT_ADD(&sc->sc_ev_mcc, CSR_READ(sc, WMREG_MCC));
   3781 	WM_EVCNT_ADD(&sc->sc_ev_latecol, CSR_READ(sc, WMREG_LATECOL));
   3782 	WM_EVCNT_ADD(&sc->sc_ev_dc, CSR_READ(sc, WMREG_DC));
   3783 	WM_EVCNT_ADD(&sc->sc_ev_gprc, CSR_READ(sc, WMREG_GPRC));
   3784 	WM_EVCNT_ADD(&sc->sc_ev_bprc, CSR_READ(sc, WMREG_BPRC));
   3785 	WM_EVCNT_ADD(&sc->sc_ev_mprc, CSR_READ(sc, WMREG_MPRC));
   3786 	WM_EVCNT_ADD(&sc->sc_ev_gptc, CSR_READ(sc, WMREG_GPTC));
   3787 
   3788 	WM_EVCNT_ADD(&sc->sc_ev_gorc,
   3789 	    CSR_READ(sc, WMREG_GORCL) +
   3790 	    ((uint64_t)CSR_READ(sc, WMREG_GORCH) << 32));
   3791 	WM_EVCNT_ADD(&sc->sc_ev_gotc,
   3792 	    CSR_READ(sc, WMREG_GOTCL) +
   3793 	    ((uint64_t)CSR_READ(sc, WMREG_GOTCH) << 32));
   3794 
   3795 	WM_EVCNT_ADD(&sc->sc_ev_rnbc, CSR_READ(sc, WMREG_RNBC));
   3796 	WM_EVCNT_ADD(&sc->sc_ev_ruc, CSR_READ(sc, WMREG_RUC));
   3797 	WM_EVCNT_ADD(&sc->sc_ev_rfc, CSR_READ(sc, WMREG_RFC));
   3798 	WM_EVCNT_ADD(&sc->sc_ev_roc, CSR_READ(sc, WMREG_ROC));
   3799 	WM_EVCNT_ADD(&sc->sc_ev_rjc, CSR_READ(sc, WMREG_RJC));
   3800 
   3801 	WM_EVCNT_ADD(&sc->sc_ev_tor,
   3802 	    CSR_READ(sc, WMREG_TORL) +
   3803 	    ((uint64_t)CSR_READ(sc, WMREG_TORH) << 32));
   3804 	WM_EVCNT_ADD(&sc->sc_ev_tot,
   3805 	    CSR_READ(sc, WMREG_TOTL) +
   3806 	    ((uint64_t)CSR_READ(sc, WMREG_TOTH) << 32));
   3807 
   3808 	WM_EVCNT_ADD(&sc->sc_ev_tpr, CSR_READ(sc, WMREG_TPR));
   3809 	WM_EVCNT_ADD(&sc->sc_ev_tpt, CSR_READ(sc, WMREG_TPT));
   3810 	WM_EVCNT_ADD(&sc->sc_ev_mptc, CSR_READ(sc, WMREG_MPTC));
   3811 	WM_EVCNT_ADD(&sc->sc_ev_bptc, CSR_READ(sc, WMREG_BPTC));
   3812 	WM_EVCNT_ADD(&sc->sc_ev_prc64, CSR_READ(sc, WMREG_PRC64));
   3813 	WM_EVCNT_ADD(&sc->sc_ev_prc127, CSR_READ(sc, WMREG_PRC127));
   3814 	WM_EVCNT_ADD(&sc->sc_ev_prc255, CSR_READ(sc, WMREG_PRC255));
   3815 	WM_EVCNT_ADD(&sc->sc_ev_prc511, CSR_READ(sc, WMREG_PRC511));
   3816 	WM_EVCNT_ADD(&sc->sc_ev_prc1023, CSR_READ(sc, WMREG_PRC1023));
   3817 	WM_EVCNT_ADD(&sc->sc_ev_prc1522, CSR_READ(sc, WMREG_PRC1522));
   3818 	WM_EVCNT_ADD(&sc->sc_ev_ptc64, CSR_READ(sc, WMREG_PTC64));
   3819 	WM_EVCNT_ADD(&sc->sc_ev_ptc127, CSR_READ(sc, WMREG_PTC127));
   3820 	WM_EVCNT_ADD(&sc->sc_ev_ptc255, CSR_READ(sc, WMREG_PTC255));
   3821 	WM_EVCNT_ADD(&sc->sc_ev_ptc511, CSR_READ(sc, WMREG_PTC511));
   3822 	WM_EVCNT_ADD(&sc->sc_ev_ptc1023, CSR_READ(sc, WMREG_PTC1023));
   3823 	WM_EVCNT_ADD(&sc->sc_ev_ptc1522, CSR_READ(sc, WMREG_PTC1522));
   3824 	WM_EVCNT_ADD(&sc->sc_ev_iac, CSR_READ(sc, WMREG_IAC));
   3825 	WM_EVCNT_ADD(&sc->sc_ev_icrxptc, CSR_READ(sc, WMREG_ICRXPTC));
   3826 	WM_EVCNT_ADD(&sc->sc_ev_icrxatc, CSR_READ(sc, WMREG_ICRXATC));
   3827 	WM_EVCNT_ADD(&sc->sc_ev_ictxptc, CSR_READ(sc, WMREG_ICTXPTC));
   3828 	WM_EVCNT_ADD(&sc->sc_ev_ictxact, CSR_READ(sc, WMREG_ICTXATC));
   3829 	WM_EVCNT_ADD(&sc->sc_ev_ictxqec, CSR_READ(sc, WMREG_ICTXQEC));
   3830 	WM_EVCNT_ADD(&sc->sc_ev_ictxqmtc, CSR_READ(sc, WMREG_ICTXQMTC));
   3831 	WM_EVCNT_ADD(&sc->sc_ev_icrxdmtc, CSR_READ(sc, WMREG_ICRXDMTC));
   3832 	WM_EVCNT_ADD(&sc->sc_ev_icrxoc, CSR_READ(sc, WMREG_ICRXOC));
   3833 
   3834 	if (sc->sc_type >= WM_T_82543) {
   3835 		algnerrc = CSR_READ(sc, WMREG_ALGNERRC);
   3836 		rxerrc = CSR_READ(sc, WMREG_RXERRC);
   3837 		cexterr = CSR_READ(sc, WMREG_CEXTERR);
   3838 		WM_EVCNT_ADD(&sc->sc_ev_algnerrc, algnerrc);
   3839 		WM_EVCNT_ADD(&sc->sc_ev_rxerrc, rxerrc);
   3840 		WM_EVCNT_ADD(&sc->sc_ev_cexterr, cexterr);
   3841 
   3842 		WM_EVCNT_ADD(&sc->sc_ev_tncrs, CSR_READ(sc, WMREG_TNCRS));
   3843 		WM_EVCNT_ADD(&sc->sc_ev_tsctc, CSR_READ(sc, WMREG_TSCTC));
   3844 		WM_EVCNT_ADD(&sc->sc_ev_tsctfc, CSR_READ(sc, WMREG_TSCTFC));
   3845 	} else
   3846 		algnerrc = rxerrc = cexterr = 0;
   3847 
   3848 	if (sc->sc_type >= WM_T_82540) {
   3849 		WM_EVCNT_ADD(&sc->sc_ev_mgtprc, CSR_READ(sc, WMREG_MGTPRC));
   3850 		WM_EVCNT_ADD(&sc->sc_ev_mgtpdc, CSR_READ(sc, WMREG_MGTPDC));
   3851 		WM_EVCNT_ADD(&sc->sc_ev_mgtptc, CSR_READ(sc, WMREG_MGTPTC));
   3852 	}
   3853 	if (((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003))
   3854 	    && ((CSR_READ(sc, WMREG_MANC) & MANC_EN_BMC2OS) != 0)) {
   3855 		WM_EVCNT_ADD(&sc->sc_ev_b2ogprc, CSR_READ(sc, WMREG_B2OGPRC));
   3856 		WM_EVCNT_ADD(&sc->sc_ev_o2bspc, CSR_READ(sc, WMREG_O2BSPC));
   3857 		WM_EVCNT_ADD(&sc->sc_ev_b2ospc, CSR_READ(sc, WMREG_B2OSPC));
   3858 		WM_EVCNT_ADD(&sc->sc_ev_o2bgptc, CSR_READ(sc, WMREG_O2BGPTC));
   3859 	}
   3860 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   3861 	if_statadd_ref(nsr, if_collisions, colc);
   3862 	if_statadd_ref(nsr, if_ierrors,
   3863 	    crcerrs + algnerrc + symerrc + rxerrc + sec + cexterr + rlec);
   3864 	/*
   3865 	 * WMREG_RNBC is incremented when there are no available buffers in
   3866 	 * host memory. It does not mean the number of dropped packets, because
   3867 	 * an Ethernet controller can receive packets in such case if there is
   3868 	 * space in the phy's FIFO.
   3869 	 *
   3870 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3871 	 * own EVCNT instead of if_iqdrops.
   3872 	 */
   3873 	if_statadd_ref(nsr, if_iqdrops, mpc);
   3874 	IF_STAT_PUTREF(ifp);
   3875 
   3876 	if (sc->sc_flags & WM_F_HAS_MII)
   3877 		mii_tick(&sc->sc_mii);
   3878 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3879 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3880 		wm_serdes_tick(sc);
   3881 	else
   3882 		wm_tbi_tick(sc);
   3883 
   3884 	mutex_exit(sc->sc_core_lock);
   3885 
   3886 	if (wm_watchdog(ifp))
   3887 		callout_schedule(&sc->sc_tick_ch, hz);
   3888 }
   3889 
   3890 static int
   3891 wm_ifflags_cb(struct ethercom *ec)
   3892 {
   3893 	struct ifnet *ifp = &ec->ec_if;
   3894 	struct wm_softc *sc = ifp->if_softc;
   3895 	u_short iffchange;
   3896 	int ecchange;
   3897 	bool needreset = false;
   3898 	int rc = 0;
   3899 
   3900 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3901 		device_xname(sc->sc_dev), __func__));
   3902 
   3903 	KASSERT(IFNET_LOCKED(ifp));
   3904 
   3905 	mutex_enter(sc->sc_core_lock);
   3906 
   3907 	/*
   3908 	 * Check for if_flags.
   3909 	 * Main usage is to prevent linkdown when opening bpf.
   3910 	 */
   3911 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3912 	sc->sc_if_flags = ifp->if_flags;
   3913 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3914 		needreset = true;
   3915 		goto ec;
   3916 	}
   3917 
   3918 	/* iff related updates */
   3919 	if ((iffchange & IFF_PROMISC) != 0)
   3920 		wm_set_filter(sc);
   3921 
   3922 	wm_set_vlan(sc);
   3923 
   3924 ec:
   3925 	/* Check for ec_capenable. */
   3926 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3927 	sc->sc_ec_capenable = ec->ec_capenable;
   3928 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3929 		needreset = true;
   3930 		goto out;
   3931 	}
   3932 
   3933 	/* ec related updates */
   3934 	wm_set_eee(sc);
   3935 
   3936 out:
   3937 	if (needreset)
   3938 		rc = ENETRESET;
   3939 	mutex_exit(sc->sc_core_lock);
   3940 
   3941 	return rc;
   3942 }
   3943 
   3944 static bool
   3945 wm_phy_need_linkdown_discard(struct wm_softc *sc)
   3946 {
   3947 
   3948 	switch (sc->sc_phytype) {
   3949 	case WMPHY_82577: /* ihphy */
   3950 	case WMPHY_82578: /* atphy */
   3951 	case WMPHY_82579: /* ihphy */
   3952 	case WMPHY_I217: /* ihphy */
   3953 	case WMPHY_82580: /* ihphy */
   3954 	case WMPHY_I350: /* ihphy */
   3955 		return true;
   3956 	default:
   3957 		return false;
   3958 	}
   3959 }
   3960 
   3961 static void
   3962 wm_set_linkdown_discard(struct wm_softc *sc)
   3963 {
   3964 
   3965 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3966 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3967 
   3968 		mutex_enter(txq->txq_lock);
   3969 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
   3970 		mutex_exit(txq->txq_lock);
   3971 	}
   3972 }
   3973 
   3974 static void
   3975 wm_clear_linkdown_discard(struct wm_softc *sc)
   3976 {
   3977 
   3978 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3979 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3980 
   3981 		mutex_enter(txq->txq_lock);
   3982 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
   3983 		mutex_exit(txq->txq_lock);
   3984 	}
   3985 }
   3986 
   3987 /*
   3988  * wm_ioctl:		[ifnet interface function]
   3989  *
   3990  *	Handle control requests from the operator.
   3991  */
   3992 static int
   3993 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3994 {
   3995 	struct wm_softc *sc = ifp->if_softc;
   3996 	struct ifreq *ifr = (struct ifreq *)data;
   3997 	struct ifaddr *ifa = (struct ifaddr *)data;
   3998 	struct sockaddr_dl *sdl;
   3999 	int error;
   4000 
   4001 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4002 		device_xname(sc->sc_dev), __func__));
   4003 
   4004 	switch (cmd) {
   4005 	case SIOCADDMULTI:
   4006 	case SIOCDELMULTI:
   4007 		break;
   4008 	default:
   4009 		KASSERT(IFNET_LOCKED(ifp));
   4010 	}
   4011 
   4012 	switch (cmd) {
   4013 	case SIOCSIFMEDIA:
   4014 		mutex_enter(sc->sc_core_lock);
   4015 		/* Flow control requires full-duplex mode. */
   4016 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   4017 		    (ifr->ifr_media & IFM_FDX) == 0)
   4018 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   4019 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   4020 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   4021 				/* We can do both TXPAUSE and RXPAUSE. */
   4022 				ifr->ifr_media |=
   4023 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   4024 			}
   4025 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   4026 		}
   4027 		mutex_exit(sc->sc_core_lock);
   4028 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   4029 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
   4030 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE) {
   4031 				DPRINTF(sc, WM_DEBUG_LINK,
   4032 				    ("%s: %s: Set linkdown discard flag\n",
   4033 					device_xname(sc->sc_dev), __func__));
   4034 				wm_set_linkdown_discard(sc);
   4035 			}
   4036 		}
   4037 		break;
   4038 	case SIOCINITIFADDR:
   4039 		mutex_enter(sc->sc_core_lock);
   4040 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   4041 			sdl = satosdl(ifp->if_dl->ifa_addr);
   4042 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   4043 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   4044 			/* Unicast address is the first multicast entry */
   4045 			wm_set_filter(sc);
   4046 			error = 0;
   4047 			mutex_exit(sc->sc_core_lock);
   4048 			break;
   4049 		}
   4050 		mutex_exit(sc->sc_core_lock);
   4051 		/*FALLTHROUGH*/
   4052 	default:
   4053 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
   4054 			if (((ifp->if_flags & IFF_UP) != 0) &&
   4055 			    ((ifr->ifr_flags & IFF_UP) == 0)) {
   4056 				DPRINTF(sc, WM_DEBUG_LINK,
   4057 				    ("%s: %s: Set linkdown discard flag\n",
   4058 					device_xname(sc->sc_dev), __func__));
   4059 				wm_set_linkdown_discard(sc);
   4060 			}
   4061 		}
   4062 		const int s = splnet();
   4063 		/* It may call wm_start, so unlock here */
   4064 		error = ether_ioctl(ifp, cmd, data);
   4065 		splx(s);
   4066 		if (error != ENETRESET)
   4067 			break;
   4068 
   4069 		error = 0;
   4070 
   4071 		if (cmd == SIOCSIFCAP)
   4072 			error = if_init(ifp);
   4073 		else if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
   4074 			mutex_enter(sc->sc_core_lock);
   4075 			if (sc->sc_if_flags & IFF_RUNNING) {
   4076 				/*
   4077 				 * Multicast list has changed; set the
   4078 				 * hardware filter accordingly.
   4079 				 */
   4080 				wm_set_filter(sc);
   4081 			}
   4082 			mutex_exit(sc->sc_core_lock);
   4083 		}
   4084 		break;
   4085 	}
   4086 
   4087 	return error;
   4088 }
   4089 
   4090 /* MAC address related */
   4091 
   4092 /*
   4093  * Get the offset of MAC address and return it.
   4094  * If error occured, use offset 0.
   4095  */
   4096 static uint16_t
   4097 wm_check_alt_mac_addr(struct wm_softc *sc)
   4098 {
   4099 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4100 	uint16_t offset = NVM_OFF_MACADDR;
   4101 
   4102 	/* Try to read alternative MAC address pointer */
   4103 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   4104 		return 0;
   4105 
   4106 	/* Check pointer if it's valid or not. */
   4107 	if ((offset == 0x0000) || (offset == 0xffff))
   4108 		return 0;
   4109 
   4110 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   4111 	/*
   4112 	 * Check whether alternative MAC address is valid or not.
   4113 	 * Some cards have non 0xffff pointer but those don't use
   4114 	 * alternative MAC address in reality.
   4115 	 *
   4116 	 * Check whether the broadcast bit is set or not.
   4117 	 */
   4118 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   4119 		if (((myea[0] & 0xff) & 0x01) == 0)
   4120 			return offset; /* Found */
   4121 
   4122 	/* Not found */
   4123 	return 0;
   4124 }
   4125 
   4126 static int
   4127 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   4128 {
   4129 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4130 	uint16_t offset = NVM_OFF_MACADDR;
   4131 	int do_invert = 0;
   4132 
   4133 	switch (sc->sc_type) {
   4134 	case WM_T_82580:
   4135 	case WM_T_I350:
   4136 	case WM_T_I354:
   4137 		/* EEPROM Top Level Partitioning */
   4138 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   4139 		break;
   4140 	case WM_T_82571:
   4141 	case WM_T_82575:
   4142 	case WM_T_82576:
   4143 	case WM_T_80003:
   4144 	case WM_T_I210:
   4145 	case WM_T_I211:
   4146 		offset = wm_check_alt_mac_addr(sc);
   4147 		if (offset == 0)
   4148 			if ((sc->sc_funcid & 0x01) == 1)
   4149 				do_invert = 1;
   4150 		break;
   4151 	default:
   4152 		if ((sc->sc_funcid & 0x01) == 1)
   4153 			do_invert = 1;
   4154 		break;
   4155 	}
   4156 
   4157 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   4158 		goto bad;
   4159 
   4160 	enaddr[0] = myea[0] & 0xff;
   4161 	enaddr[1] = myea[0] >> 8;
   4162 	enaddr[2] = myea[1] & 0xff;
   4163 	enaddr[3] = myea[1] >> 8;
   4164 	enaddr[4] = myea[2] & 0xff;
   4165 	enaddr[5] = myea[2] >> 8;
   4166 
   4167 	/*
   4168 	 * Toggle the LSB of the MAC address on the second port
   4169 	 * of some dual port cards.
   4170 	 */
   4171 	if (do_invert != 0)
   4172 		enaddr[5] ^= 1;
   4173 
   4174 	return 0;
   4175 
   4176 bad:
   4177 	return -1;
   4178 }
   4179 
   4180 /*
   4181  * wm_set_ral:
   4182  *
   4183  *	Set an entery in the receive address list.
   4184  */
   4185 static void
   4186 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   4187 {
   4188 	uint32_t ral_lo, ral_hi, addrl, addrh;
   4189 	uint32_t wlock_mac;
   4190 	int rv;
   4191 
   4192 	if (enaddr != NULL) {
   4193 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   4194 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   4195 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   4196 		ral_hi |= RAL_AV;
   4197 	} else {
   4198 		ral_lo = 0;
   4199 		ral_hi = 0;
   4200 	}
   4201 
   4202 	switch (sc->sc_type) {
   4203 	case WM_T_82542_2_0:
   4204 	case WM_T_82542_2_1:
   4205 	case WM_T_82543:
   4206 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   4207 		CSR_WRITE_FLUSH(sc);
   4208 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   4209 		CSR_WRITE_FLUSH(sc);
   4210 		break;
   4211 	case WM_T_PCH2:
   4212 	case WM_T_PCH_LPT:
   4213 	case WM_T_PCH_SPT:
   4214 	case WM_T_PCH_CNP:
   4215 		if (idx == 0) {
   4216 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4217 			CSR_WRITE_FLUSH(sc);
   4218 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4219 			CSR_WRITE_FLUSH(sc);
   4220 			return;
   4221 		}
   4222 		if (sc->sc_type != WM_T_PCH2) {
   4223 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   4224 			    FWSM_WLOCK_MAC);
   4225 			addrl = WMREG_SHRAL(idx - 1);
   4226 			addrh = WMREG_SHRAH(idx - 1);
   4227 		} else {
   4228 			wlock_mac = 0;
   4229 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   4230 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   4231 		}
   4232 
   4233 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   4234 			rv = wm_get_swflag_ich8lan(sc);
   4235 			if (rv != 0)
   4236 				return;
   4237 			CSR_WRITE(sc, addrl, ral_lo);
   4238 			CSR_WRITE_FLUSH(sc);
   4239 			CSR_WRITE(sc, addrh, ral_hi);
   4240 			CSR_WRITE_FLUSH(sc);
   4241 			wm_put_swflag_ich8lan(sc);
   4242 		}
   4243 
   4244 		break;
   4245 	default:
   4246 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4247 		CSR_WRITE_FLUSH(sc);
   4248 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4249 		CSR_WRITE_FLUSH(sc);
   4250 		break;
   4251 	}
   4252 }
   4253 
   4254 /*
   4255  * wm_mchash:
   4256  *
   4257  *	Compute the hash of the multicast address for the 4096-bit
   4258  *	multicast filter.
   4259  */
   4260 static uint32_t
   4261 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   4262 {
   4263 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   4264 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   4265 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   4266 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   4267 	uint32_t hash;
   4268 
   4269 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4270 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4271 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4272 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   4273 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   4274 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   4275 		return (hash & 0x3ff);
   4276 	}
   4277 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   4278 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   4279 
   4280 	return (hash & 0xfff);
   4281 }
   4282 
   4283 /*
   4284  *
   4285  *
   4286  */
   4287 static int
   4288 wm_rar_count(struct wm_softc *sc)
   4289 {
   4290 	int size;
   4291 
   4292 	switch (sc->sc_type) {
   4293 	case WM_T_ICH8:
   4294 		size = WM_RAL_TABSIZE_ICH8 -1;
   4295 		break;
   4296 	case WM_T_ICH9:
   4297 	case WM_T_ICH10:
   4298 	case WM_T_PCH:
   4299 		size = WM_RAL_TABSIZE_ICH8;
   4300 		break;
   4301 	case WM_T_PCH2:
   4302 		size = WM_RAL_TABSIZE_PCH2;
   4303 		break;
   4304 	case WM_T_PCH_LPT:
   4305 	case WM_T_PCH_SPT:
   4306 	case WM_T_PCH_CNP:
   4307 		size = WM_RAL_TABSIZE_PCH_LPT;
   4308 		break;
   4309 	case WM_T_82575:
   4310 	case WM_T_I210:
   4311 	case WM_T_I211:
   4312 		size = WM_RAL_TABSIZE_82575;
   4313 		break;
   4314 	case WM_T_82576:
   4315 	case WM_T_82580:
   4316 		size = WM_RAL_TABSIZE_82576;
   4317 		break;
   4318 	case WM_T_I350:
   4319 	case WM_T_I354:
   4320 		size = WM_RAL_TABSIZE_I350;
   4321 		break;
   4322 	default:
   4323 		size = WM_RAL_TABSIZE;
   4324 	}
   4325 
   4326 	return size;
   4327 }
   4328 
   4329 /*
   4330  * wm_set_filter:
   4331  *
   4332  *	Set up the receive filter.
   4333  */
   4334 static void
   4335 wm_set_filter(struct wm_softc *sc)
   4336 {
   4337 	struct ethercom *ec = &sc->sc_ethercom;
   4338 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   4339 	struct ether_multi *enm;
   4340 	struct ether_multistep step;
   4341 	bus_addr_t mta_reg;
   4342 	uint32_t hash, reg, bit;
   4343 	int i, size, ralmax, rv;
   4344 
   4345 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4346 		device_xname(sc->sc_dev), __func__));
   4347 	KASSERT(mutex_owned(sc->sc_core_lock));
   4348 
   4349 	if (sc->sc_type >= WM_T_82544)
   4350 		mta_reg = WMREG_CORDOVA_MTA;
   4351 	else
   4352 		mta_reg = WMREG_MTA;
   4353 
   4354 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   4355 
   4356 	if (sc->sc_if_flags & IFF_BROADCAST)
   4357 		sc->sc_rctl |= RCTL_BAM;
   4358 	if (sc->sc_if_flags & IFF_PROMISC) {
   4359 		sc->sc_rctl |= RCTL_UPE;
   4360 		ETHER_LOCK(ec);
   4361 		ec->ec_flags |= ETHER_F_ALLMULTI;
   4362 		ETHER_UNLOCK(ec);
   4363 		goto allmulti;
   4364 	}
   4365 
   4366 	/*
   4367 	 * Set the station address in the first RAL slot, and
   4368 	 * clear the remaining slots.
   4369 	 */
   4370 	size = wm_rar_count(sc);
   4371 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   4372 
   4373 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   4374 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   4375 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   4376 		switch (i) {
   4377 		case 0:
   4378 			/* We can use all entries */
   4379 			ralmax = size;
   4380 			break;
   4381 		case 1:
   4382 			/* Only RAR[0] */
   4383 			ralmax = 1;
   4384 			break;
   4385 		default:
   4386 			/* Available SHRA + RAR[0] */
   4387 			ralmax = i + 1;
   4388 		}
   4389 	} else
   4390 		ralmax = size;
   4391 	for (i = 1; i < size; i++) {
   4392 		if (i < ralmax)
   4393 			wm_set_ral(sc, NULL, i);
   4394 	}
   4395 
   4396 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4397 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4398 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4399 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   4400 		size = WM_ICH8_MC_TABSIZE;
   4401 	else
   4402 		size = WM_MC_TABSIZE;
   4403 	/* Clear out the multicast table. */
   4404 	for (i = 0; i < size; i++) {
   4405 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   4406 		CSR_WRITE_FLUSH(sc);
   4407 	}
   4408 
   4409 	ETHER_LOCK(ec);
   4410 	ETHER_FIRST_MULTI(step, ec, enm);
   4411 	while (enm != NULL) {
   4412 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   4413 			ec->ec_flags |= ETHER_F_ALLMULTI;
   4414 			ETHER_UNLOCK(ec);
   4415 			/*
   4416 			 * We must listen to a range of multicast addresses.
   4417 			 * For now, just accept all multicasts, rather than
   4418 			 * trying to set only those filter bits needed to match
   4419 			 * the range.  (At this time, the only use of address
   4420 			 * ranges is for IP multicast routing, for which the
   4421 			 * range is big enough to require all bits set.)
   4422 			 */
   4423 			goto allmulti;
   4424 		}
   4425 
   4426 		hash = wm_mchash(sc, enm->enm_addrlo);
   4427 
   4428 		reg = (hash >> 5);
   4429 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4430 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4431 		    || (sc->sc_type == WM_T_PCH2)
   4432 		    || (sc->sc_type == WM_T_PCH_LPT)
   4433 		    || (sc->sc_type == WM_T_PCH_SPT)
   4434 		    || (sc->sc_type == WM_T_PCH_CNP))
   4435 			reg &= 0x1f;
   4436 		else
   4437 			reg &= 0x7f;
   4438 		bit = hash & 0x1f;
   4439 
   4440 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   4441 		hash |= 1U << bit;
   4442 
   4443 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   4444 			/*
   4445 			 * 82544 Errata 9: Certain register cannot be written
   4446 			 * with particular alignments in PCI-X bus operation
   4447 			 * (FCAH, MTA and VFTA).
   4448 			 */
   4449 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   4450 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4451 			CSR_WRITE_FLUSH(sc);
   4452 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   4453 			CSR_WRITE_FLUSH(sc);
   4454 		} else {
   4455 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4456 			CSR_WRITE_FLUSH(sc);
   4457 		}
   4458 
   4459 		ETHER_NEXT_MULTI(step, enm);
   4460 	}
   4461 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4462 	ETHER_UNLOCK(ec);
   4463 
   4464 	goto setit;
   4465 
   4466 allmulti:
   4467 	sc->sc_rctl |= RCTL_MPE;
   4468 
   4469 setit:
   4470 	if (sc->sc_type >= WM_T_PCH2) {
   4471 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4472 		    && (ifp->if_mtu > ETHERMTU))
   4473 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
   4474 		else
   4475 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
   4476 		if (rv != 0)
   4477 			device_printf(sc->sc_dev,
   4478 			    "Failed to do workaround for jumbo frame.\n");
   4479 	}
   4480 
   4481 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   4482 }
   4483 
   4484 /* Reset and init related */
   4485 
   4486 static void
   4487 wm_set_vlan(struct wm_softc *sc)
   4488 {
   4489 
   4490 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4491 		device_xname(sc->sc_dev), __func__));
   4492 
   4493 	/* Deal with VLAN enables. */
   4494 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   4495 		sc->sc_ctrl |= CTRL_VME;
   4496 	else
   4497 		sc->sc_ctrl &= ~CTRL_VME;
   4498 
   4499 	/* Write the control registers. */
   4500 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4501 }
   4502 
   4503 static void
   4504 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   4505 {
   4506 	uint32_t gcr;
   4507 	pcireg_t ctrl2;
   4508 
   4509 	gcr = CSR_READ(sc, WMREG_GCR);
   4510 
   4511 	/* Only take action if timeout value is defaulted to 0 */
   4512 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   4513 		goto out;
   4514 
   4515 	if ((gcr & GCR_CAP_VER2) == 0) {
   4516 		gcr |= GCR_CMPL_TMOUT_10MS;
   4517 		goto out;
   4518 	}
   4519 
   4520 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4521 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   4522 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   4523 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4524 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   4525 
   4526 out:
   4527 	/* Disable completion timeout resend */
   4528 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   4529 
   4530 	CSR_WRITE(sc, WMREG_GCR, gcr);
   4531 }
   4532 
   4533 void
   4534 wm_get_auto_rd_done(struct wm_softc *sc)
   4535 {
   4536 	int i;
   4537 
   4538 	/* wait for eeprom to reload */
   4539 	switch (sc->sc_type) {
   4540 	case WM_T_82571:
   4541 	case WM_T_82572:
   4542 	case WM_T_82573:
   4543 	case WM_T_82574:
   4544 	case WM_T_82583:
   4545 	case WM_T_82575:
   4546 	case WM_T_82576:
   4547 	case WM_T_82580:
   4548 	case WM_T_I350:
   4549 	case WM_T_I354:
   4550 	case WM_T_I210:
   4551 	case WM_T_I211:
   4552 	case WM_T_80003:
   4553 	case WM_T_ICH8:
   4554 	case WM_T_ICH9:
   4555 		for (i = 0; i < 10; i++) {
   4556 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   4557 				break;
   4558 			delay(1000);
   4559 		}
   4560 		if (i == 10) {
   4561 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4562 			    "complete\n", device_xname(sc->sc_dev));
   4563 		}
   4564 		break;
   4565 	default:
   4566 		break;
   4567 	}
   4568 }
   4569 
   4570 void
   4571 wm_lan_init_done(struct wm_softc *sc)
   4572 {
   4573 	uint32_t reg = 0;
   4574 	int i;
   4575 
   4576 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4577 		device_xname(sc->sc_dev), __func__));
   4578 
   4579 	/* Wait for eeprom to reload */
   4580 	switch (sc->sc_type) {
   4581 	case WM_T_ICH10:
   4582 	case WM_T_PCH:
   4583 	case WM_T_PCH2:
   4584 	case WM_T_PCH_LPT:
   4585 	case WM_T_PCH_SPT:
   4586 	case WM_T_PCH_CNP:
   4587 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4588 			reg = CSR_READ(sc, WMREG_STATUS);
   4589 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4590 				break;
   4591 			delay(100);
   4592 		}
   4593 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4594 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4595 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4596 		}
   4597 		break;
   4598 	default:
   4599 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4600 		    __func__);
   4601 		break;
   4602 	}
   4603 
   4604 	reg &= ~STATUS_LAN_INIT_DONE;
   4605 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4606 }
   4607 
   4608 void
   4609 wm_get_cfg_done(struct wm_softc *sc)
   4610 {
   4611 	int mask;
   4612 	uint32_t reg;
   4613 	int i;
   4614 
   4615 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4616 		device_xname(sc->sc_dev), __func__));
   4617 
   4618 	/* Wait for eeprom to reload */
   4619 	switch (sc->sc_type) {
   4620 	case WM_T_82542_2_0:
   4621 	case WM_T_82542_2_1:
   4622 		/* null */
   4623 		break;
   4624 	case WM_T_82543:
   4625 	case WM_T_82544:
   4626 	case WM_T_82540:
   4627 	case WM_T_82545:
   4628 	case WM_T_82545_3:
   4629 	case WM_T_82546:
   4630 	case WM_T_82546_3:
   4631 	case WM_T_82541:
   4632 	case WM_T_82541_2:
   4633 	case WM_T_82547:
   4634 	case WM_T_82547_2:
   4635 	case WM_T_82573:
   4636 	case WM_T_82574:
   4637 	case WM_T_82583:
   4638 		/* generic */
   4639 		delay(10*1000);
   4640 		break;
   4641 	case WM_T_80003:
   4642 	case WM_T_82571:
   4643 	case WM_T_82572:
   4644 	case WM_T_82575:
   4645 	case WM_T_82576:
   4646 	case WM_T_82580:
   4647 	case WM_T_I350:
   4648 	case WM_T_I354:
   4649 	case WM_T_I210:
   4650 	case WM_T_I211:
   4651 		if (sc->sc_type == WM_T_82571) {
   4652 			/* Only 82571 shares port 0 */
   4653 			mask = EEMNGCTL_CFGDONE_0;
   4654 		} else
   4655 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4656 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4657 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4658 				break;
   4659 			delay(1000);
   4660 		}
   4661 		if (i >= WM_PHY_CFG_TIMEOUT)
   4662 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
   4663 				device_xname(sc->sc_dev), __func__));
   4664 		break;
   4665 	case WM_T_ICH8:
   4666 	case WM_T_ICH9:
   4667 	case WM_T_ICH10:
   4668 	case WM_T_PCH:
   4669 	case WM_T_PCH2:
   4670 	case WM_T_PCH_LPT:
   4671 	case WM_T_PCH_SPT:
   4672 	case WM_T_PCH_CNP:
   4673 		delay(10*1000);
   4674 		if (sc->sc_type >= WM_T_ICH10)
   4675 			wm_lan_init_done(sc);
   4676 		else
   4677 			wm_get_auto_rd_done(sc);
   4678 
   4679 		/* Clear PHY Reset Asserted bit */
   4680 		reg = CSR_READ(sc, WMREG_STATUS);
   4681 		if ((reg & STATUS_PHYRA) != 0)
   4682 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4683 		break;
   4684 	default:
   4685 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4686 		    __func__);
   4687 		break;
   4688 	}
   4689 }
   4690 
   4691 int
   4692 wm_phy_post_reset(struct wm_softc *sc)
   4693 {
   4694 	device_t dev = sc->sc_dev;
   4695 	uint16_t reg;
   4696 	int rv = 0;
   4697 
   4698 	/* This function is only for ICH8 and newer. */
   4699 	if (sc->sc_type < WM_T_ICH8)
   4700 		return 0;
   4701 
   4702 	if (wm_phy_resetisblocked(sc)) {
   4703 		/* XXX */
   4704 		device_printf(dev, "PHY is blocked\n");
   4705 		return -1;
   4706 	}
   4707 
   4708 	/* Allow time for h/w to get to quiescent state after reset */
   4709 	delay(10*1000);
   4710 
   4711 	/* Perform any necessary post-reset workarounds */
   4712 	if (sc->sc_type == WM_T_PCH)
   4713 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4714 	else if (sc->sc_type == WM_T_PCH2)
   4715 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4716 	if (rv != 0)
   4717 		return rv;
   4718 
   4719 	/* Clear the host wakeup bit after lcd reset */
   4720 	if (sc->sc_type >= WM_T_PCH) {
   4721 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4722 		reg &= ~BM_WUC_HOST_WU_BIT;
   4723 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4724 	}
   4725 
   4726 	/* Configure the LCD with the extended configuration region in NVM */
   4727 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4728 		return rv;
   4729 
   4730 	/* Configure the LCD with the OEM bits in NVM */
   4731 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4732 
   4733 	if (sc->sc_type == WM_T_PCH2) {
   4734 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4735 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4736 			delay(10 * 1000);
   4737 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4738 		}
   4739 		/* Set EEE LPI Update Timer to 200usec */
   4740 		rv = sc->phy.acquire(sc);
   4741 		if (rv)
   4742 			return rv;
   4743 		rv = wm_write_emi_reg_locked(dev,
   4744 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4745 		sc->phy.release(sc);
   4746 	}
   4747 
   4748 	return rv;
   4749 }
   4750 
   4751 /* Only for PCH and newer */
   4752 static int
   4753 wm_write_smbus_addr(struct wm_softc *sc)
   4754 {
   4755 	uint32_t strap, freq;
   4756 	uint16_t phy_data;
   4757 	int rv;
   4758 
   4759 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4760 		device_xname(sc->sc_dev), __func__));
   4761 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4762 
   4763 	strap = CSR_READ(sc, WMREG_STRAP);
   4764 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4765 
   4766 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4767 	if (rv != 0)
   4768 		return rv;
   4769 
   4770 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4771 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4772 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4773 
   4774 	if (sc->sc_phytype == WMPHY_I217) {
   4775 		/* Restore SMBus frequency */
   4776 		if (freq --) {
   4777 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4778 			    | HV_SMB_ADDR_FREQ_HIGH);
   4779 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4780 			    HV_SMB_ADDR_FREQ_LOW);
   4781 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4782 			    HV_SMB_ADDR_FREQ_HIGH);
   4783 		} else
   4784 			DPRINTF(sc, WM_DEBUG_INIT,
   4785 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4786 				device_xname(sc->sc_dev), __func__));
   4787 	}
   4788 
   4789 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4790 	    phy_data);
   4791 }
   4792 
   4793 static int
   4794 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4795 {
   4796 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4797 	uint16_t phy_page = 0;
   4798 	int rv = 0;
   4799 
   4800 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4801 		device_xname(sc->sc_dev), __func__));
   4802 
   4803 	switch (sc->sc_type) {
   4804 	case WM_T_ICH8:
   4805 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4806 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4807 			return 0;
   4808 
   4809 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4810 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4811 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4812 			break;
   4813 		}
   4814 		/* FALLTHROUGH */
   4815 	case WM_T_PCH:
   4816 	case WM_T_PCH2:
   4817 	case WM_T_PCH_LPT:
   4818 	case WM_T_PCH_SPT:
   4819 	case WM_T_PCH_CNP:
   4820 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4821 		break;
   4822 	default:
   4823 		return 0;
   4824 	}
   4825 
   4826 	if ((rv = sc->phy.acquire(sc)) != 0)
   4827 		return rv;
   4828 
   4829 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4830 	if ((reg & sw_cfg_mask) == 0)
   4831 		goto release;
   4832 
   4833 	/*
   4834 	 * Make sure HW does not configure LCD from PHY extended configuration
   4835 	 * before SW configuration
   4836 	 */
   4837 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4838 	if ((sc->sc_type < WM_T_PCH2)
   4839 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4840 		goto release;
   4841 
   4842 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4843 		device_xname(sc->sc_dev), __func__));
   4844 	/* word_addr is in DWORD */
   4845 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4846 
   4847 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4848 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4849 	if (cnf_size == 0)
   4850 		goto release;
   4851 
   4852 	if (((sc->sc_type == WM_T_PCH)
   4853 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4854 	    || (sc->sc_type > WM_T_PCH)) {
   4855 		/*
   4856 		 * HW configures the SMBus address and LEDs when the OEM and
   4857 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4858 		 * are cleared, SW will configure them instead.
   4859 		 */
   4860 		DPRINTF(sc, WM_DEBUG_INIT,
   4861 		    ("%s: %s: Configure SMBus and LED\n",
   4862 			device_xname(sc->sc_dev), __func__));
   4863 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4864 			goto release;
   4865 
   4866 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4867 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4868 		    (uint16_t)reg);
   4869 		if (rv != 0)
   4870 			goto release;
   4871 	}
   4872 
   4873 	/* Configure LCD from extended configuration region. */
   4874 	for (i = 0; i < cnf_size; i++) {
   4875 		uint16_t reg_data, reg_addr;
   4876 
   4877 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4878 			goto release;
   4879 
   4880 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4881 			goto release;
   4882 
   4883 		if (reg_addr == IGPHY_PAGE_SELECT)
   4884 			phy_page = reg_data;
   4885 
   4886 		reg_addr &= IGPHY_MAXREGADDR;
   4887 		reg_addr |= phy_page;
   4888 
   4889 		KASSERT(sc->phy.writereg_locked != NULL);
   4890 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4891 		    reg_data);
   4892 	}
   4893 
   4894 release:
   4895 	sc->phy.release(sc);
   4896 	return rv;
   4897 }
   4898 
   4899 /*
   4900  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4901  *  @sc:       pointer to the HW structure
   4902  *  @d0_state: boolean if entering d0 or d3 device state
   4903  *
   4904  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4905  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4906  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4907  */
   4908 int
   4909 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4910 {
   4911 	uint32_t mac_reg;
   4912 	uint16_t oem_reg;
   4913 	int rv;
   4914 
   4915 	if (sc->sc_type < WM_T_PCH)
   4916 		return 0;
   4917 
   4918 	rv = sc->phy.acquire(sc);
   4919 	if (rv != 0)
   4920 		return rv;
   4921 
   4922 	if (sc->sc_type == WM_T_PCH) {
   4923 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4924 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4925 			goto release;
   4926 	}
   4927 
   4928 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4929 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4930 		goto release;
   4931 
   4932 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4933 
   4934 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4935 	if (rv != 0)
   4936 		goto release;
   4937 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4938 
   4939 	if (d0_state) {
   4940 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4941 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4942 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4943 			oem_reg |= HV_OEM_BITS_LPLU;
   4944 	} else {
   4945 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4946 		    != 0)
   4947 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4948 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4949 		    != 0)
   4950 			oem_reg |= HV_OEM_BITS_LPLU;
   4951 	}
   4952 
   4953 	/* Set Restart auto-neg to activate the bits */
   4954 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4955 	    && (wm_phy_resetisblocked(sc) == false))
   4956 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4957 
   4958 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4959 
   4960 release:
   4961 	sc->phy.release(sc);
   4962 
   4963 	return rv;
   4964 }
   4965 
   4966 /* Init hardware bits */
   4967 void
   4968 wm_initialize_hardware_bits(struct wm_softc *sc)
   4969 {
   4970 	uint32_t tarc0, tarc1, reg;
   4971 
   4972 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4973 		device_xname(sc->sc_dev), __func__));
   4974 
   4975 	/* For 82571 variant, 80003 and ICHs */
   4976 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4977 	    || (sc->sc_type >= WM_T_80003)) {
   4978 
   4979 		/* Transmit Descriptor Control 0 */
   4980 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4981 		reg |= TXDCTL_COUNT_DESC;
   4982 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4983 
   4984 		/* Transmit Descriptor Control 1 */
   4985 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4986 		reg |= TXDCTL_COUNT_DESC;
   4987 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4988 
   4989 		/* TARC0 */
   4990 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4991 		switch (sc->sc_type) {
   4992 		case WM_T_82571:
   4993 		case WM_T_82572:
   4994 		case WM_T_82573:
   4995 		case WM_T_82574:
   4996 		case WM_T_82583:
   4997 		case WM_T_80003:
   4998 			/* Clear bits 30..27 */
   4999 			tarc0 &= ~__BITS(30, 27);
   5000 			break;
   5001 		default:
   5002 			break;
   5003 		}
   5004 
   5005 		switch (sc->sc_type) {
   5006 		case WM_T_82571:
   5007 		case WM_T_82572:
   5008 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   5009 
   5010 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5011 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   5012 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   5013 			/* 8257[12] Errata No.7 */
   5014 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   5015 
   5016 			/* TARC1 bit 28 */
   5017 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5018 				tarc1 &= ~__BIT(28);
   5019 			else
   5020 				tarc1 |= __BIT(28);
   5021 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5022 
   5023 			/*
   5024 			 * 8257[12] Errata No.13
   5025 			 * Disable Dyamic Clock Gating.
   5026 			 */
   5027 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5028 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   5029 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5030 			break;
   5031 		case WM_T_82573:
   5032 		case WM_T_82574:
   5033 		case WM_T_82583:
   5034 			if ((sc->sc_type == WM_T_82574)
   5035 			    || (sc->sc_type == WM_T_82583))
   5036 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   5037 
   5038 			/* Extended Device Control */
   5039 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5040 			reg &= ~__BIT(23);	/* Clear bit 23 */
   5041 			reg |= __BIT(22);	/* Set bit 22 */
   5042 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5043 
   5044 			/* Device Control */
   5045 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   5046 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5047 
   5048 			/* PCIe Control Register */
   5049 			/*
   5050 			 * 82573 Errata (unknown).
   5051 			 *
   5052 			 * 82574 Errata 25 and 82583 Errata 12
   5053 			 * "Dropped Rx Packets":
   5054 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   5055 			 */
   5056 			reg = CSR_READ(sc, WMREG_GCR);
   5057 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   5058 			CSR_WRITE(sc, WMREG_GCR, reg);
   5059 
   5060 			if ((sc->sc_type == WM_T_82574)
   5061 			    || (sc->sc_type == WM_T_82583)) {
   5062 				/*
   5063 				 * Document says this bit must be set for
   5064 				 * proper operation.
   5065 				 */
   5066 				reg = CSR_READ(sc, WMREG_GCR);
   5067 				reg |= __BIT(22);
   5068 				CSR_WRITE(sc, WMREG_GCR, reg);
   5069 
   5070 				/*
   5071 				 * Apply workaround for hardware errata
   5072 				 * documented in errata docs Fixes issue where
   5073 				 * some error prone or unreliable PCIe
   5074 				 * completions are occurring, particularly
   5075 				 * with ASPM enabled. Without fix, issue can
   5076 				 * cause Tx timeouts.
   5077 				 */
   5078 				reg = CSR_READ(sc, WMREG_GCR2);
   5079 				reg |= __BIT(0);
   5080 				CSR_WRITE(sc, WMREG_GCR2, reg);
   5081 			}
   5082 			break;
   5083 		case WM_T_80003:
   5084 			/* TARC0 */
   5085 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   5086 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   5087 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   5088 
   5089 			/* TARC1 bit 28 */
   5090 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5091 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5092 				tarc1 &= ~__BIT(28);
   5093 			else
   5094 				tarc1 |= __BIT(28);
   5095 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5096 			break;
   5097 		case WM_T_ICH8:
   5098 		case WM_T_ICH9:
   5099 		case WM_T_ICH10:
   5100 		case WM_T_PCH:
   5101 		case WM_T_PCH2:
   5102 		case WM_T_PCH_LPT:
   5103 		case WM_T_PCH_SPT:
   5104 		case WM_T_PCH_CNP:
   5105 			/* TARC0 */
   5106 			if (sc->sc_type == WM_T_ICH8) {
   5107 				/* Set TARC0 bits 29 and 28 */
   5108 				tarc0 |= __BITS(29, 28);
   5109 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   5110 				tarc0 |= __BIT(29);
   5111 				/*
   5112 				 *  Drop bit 28. From Linux.
   5113 				 * See I218/I219 spec update
   5114 				 * "5. Buffer Overrun While the I219 is
   5115 				 * Processing DMA Transactions"
   5116 				 */
   5117 				tarc0 &= ~__BIT(28);
   5118 			}
   5119 			/* Set TARC0 bits 23,24,26,27 */
   5120 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   5121 
   5122 			/* CTRL_EXT */
   5123 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5124 			reg |= __BIT(22);	/* Set bit 22 */
   5125 			/*
   5126 			 * Enable PHY low-power state when MAC is at D3
   5127 			 * w/o WoL
   5128 			 */
   5129 			if (sc->sc_type >= WM_T_PCH)
   5130 				reg |= CTRL_EXT_PHYPDEN;
   5131 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5132 
   5133 			/* TARC1 */
   5134 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5135 			/* bit 28 */
   5136 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5137 				tarc1 &= ~__BIT(28);
   5138 			else
   5139 				tarc1 |= __BIT(28);
   5140 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   5141 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5142 
   5143 			/* Device Status */
   5144 			if (sc->sc_type == WM_T_ICH8) {
   5145 				reg = CSR_READ(sc, WMREG_STATUS);
   5146 				reg &= ~__BIT(31);
   5147 				CSR_WRITE(sc, WMREG_STATUS, reg);
   5148 
   5149 			}
   5150 
   5151 			/* IOSFPC */
   5152 			if (sc->sc_type == WM_T_PCH_SPT) {
   5153 				reg = CSR_READ(sc, WMREG_IOSFPC);
   5154 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   5155 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   5156 			}
   5157 			/*
   5158 			 * Work-around descriptor data corruption issue during
   5159 			 * NFS v2 UDP traffic, just disable the NFS filtering
   5160 			 * capability.
   5161 			 */
   5162 			reg = CSR_READ(sc, WMREG_RFCTL);
   5163 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   5164 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5165 			break;
   5166 		default:
   5167 			break;
   5168 		}
   5169 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   5170 
   5171 		switch (sc->sc_type) {
   5172 		case WM_T_82571:
   5173 		case WM_T_82572:
   5174 		case WM_T_82573:
   5175 		case WM_T_80003:
   5176 		case WM_T_ICH8:
   5177 			/*
   5178 			 * 8257[12] Errata No.52, 82573 Errata No.43 and some
   5179 			 * others to avoid RSS Hash Value bug.
   5180 			 */
   5181 			reg = CSR_READ(sc, WMREG_RFCTL);
   5182 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   5183 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5184 			break;
   5185 		case WM_T_82574:
   5186 			/* Use extened Rx descriptor. */
   5187 			reg = CSR_READ(sc, WMREG_RFCTL);
   5188 			reg |= WMREG_RFCTL_EXSTEN;
   5189 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5190 			break;
   5191 		default:
   5192 			break;
   5193 		}
   5194 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   5195 		/*
   5196 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   5197 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   5198 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   5199 		 * Correctly by the Device"
   5200 		 *
   5201 		 * I354(C2000) Errata AVR53:
   5202 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   5203 		 * Hang"
   5204 		 */
   5205 		reg = CSR_READ(sc, WMREG_RFCTL);
   5206 		reg |= WMREG_RFCTL_IPV6EXDIS;
   5207 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   5208 	}
   5209 }
   5210 
   5211 static uint32_t
   5212 wm_rxpbs_adjust_82580(uint32_t val)
   5213 {
   5214 	uint32_t rv = 0;
   5215 
   5216 	if (val < __arraycount(wm_82580_rxpbs_table))
   5217 		rv = wm_82580_rxpbs_table[val];
   5218 
   5219 	return rv;
   5220 }
   5221 
   5222 /*
   5223  * wm_reset_phy:
   5224  *
   5225  *	generic PHY reset function.
   5226  *	Same as e1000_phy_hw_reset_generic()
   5227  */
   5228 static int
   5229 wm_reset_phy(struct wm_softc *sc)
   5230 {
   5231 	uint32_t reg;
   5232 	int rv;
   5233 
   5234 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5235 		device_xname(sc->sc_dev), __func__));
   5236 	if (wm_phy_resetisblocked(sc))
   5237 		return -1;
   5238 
   5239 	rv = sc->phy.acquire(sc);
   5240 	if (rv) {
   5241 		device_printf(sc->sc_dev, "%s: failed to acquire phy: %d\n",
   5242 		    __func__, rv);
   5243 		return rv;
   5244 	}
   5245 
   5246 	reg = CSR_READ(sc, WMREG_CTRL);
   5247 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   5248 	CSR_WRITE_FLUSH(sc);
   5249 
   5250 	delay(sc->phy.reset_delay_us);
   5251 
   5252 	CSR_WRITE(sc, WMREG_CTRL, reg);
   5253 	CSR_WRITE_FLUSH(sc);
   5254 
   5255 	delay(150);
   5256 
   5257 	sc->phy.release(sc);
   5258 
   5259 	wm_get_cfg_done(sc);
   5260 	wm_phy_post_reset(sc);
   5261 
   5262 	return 0;
   5263 }
   5264 
   5265 /*
   5266  * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
   5267  *
   5268  * In i219, the descriptor rings must be emptied before resetting the HW
   5269  * or before changing the device state to D3 during runtime (runtime PM).
   5270  *
   5271  * Failure to do this will cause the HW to enter a unit hang state which can
   5272  * only be released by PCI reset on the device.
   5273  *
   5274  * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
   5275  */
   5276 static void
   5277 wm_flush_desc_rings(struct wm_softc *sc)
   5278 {
   5279 	pcireg_t preg;
   5280 	uint32_t reg;
   5281 	struct wm_txqueue *txq;
   5282 	wiseman_txdesc_t *txd;
   5283 	int nexttx;
   5284 	uint32_t rctl;
   5285 
   5286 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
   5287 
   5288 	/* First, disable MULR fix in FEXTNVM11 */
   5289 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   5290 	reg |= FEXTNVM11_DIS_MULRFIX;
   5291 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   5292 
   5293 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5294 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   5295 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   5296 		return;
   5297 
   5298 	/*
   5299 	 * Remove all descriptors from the tx_ring.
   5300 	 *
   5301 	 * We want to clear all pending descriptors from the TX ring. Zeroing
   5302 	 * happens when the HW reads the regs. We assign the ring itself as
   5303 	 * the data of the next descriptor. We don't care about the data we are
   5304 	 * about to reset the HW.
   5305 	 */
   5306 #ifdef WM_DEBUG
   5307 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
   5308 #endif
   5309 	reg = CSR_READ(sc, WMREG_TCTL);
   5310 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   5311 
   5312 	txq = &sc->sc_queue[0].wmq_txq;
   5313 	nexttx = txq->txq_next;
   5314 	txd = &txq->txq_descs[nexttx];
   5315 	wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
   5316 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   5317 	txd->wtx_fields.wtxu_status = 0;
   5318 	txd->wtx_fields.wtxu_options = 0;
   5319 	txd->wtx_fields.wtxu_vlan = 0;
   5320 
   5321 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5322 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5323 
   5324 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   5325 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   5326 	CSR_WRITE_FLUSH(sc);
   5327 	delay(250);
   5328 
   5329 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5330 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   5331 		return;
   5332 
   5333 	/*
   5334 	 * Mark all descriptors in the RX ring as consumed and disable the
   5335 	 * rx ring.
   5336 	 */
   5337 #ifdef WM_DEBUG
   5338 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   5339 #endif
   5340 	rctl = CSR_READ(sc, WMREG_RCTL);
   5341 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5342 	CSR_WRITE_FLUSH(sc);
   5343 	delay(150);
   5344 
   5345 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   5346 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   5347 	reg &= 0xffffc000;
   5348 	/*
   5349 	 * Update thresholds: prefetch threshold to 31, host threshold
   5350 	 * to 1 and make sure the granularity is "descriptors" and not
   5351 	 * "cache lines"
   5352 	 */
   5353 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   5354 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   5355 
   5356 	/* Momentarily enable the RX ring for the changes to take effect */
   5357 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   5358 	CSR_WRITE_FLUSH(sc);
   5359 	delay(150);
   5360 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5361 }
   5362 
   5363 /*
   5364  * wm_reset:
   5365  *
   5366  *	Reset the i82542 chip.
   5367  */
   5368 static void
   5369 wm_reset(struct wm_softc *sc)
   5370 {
   5371 	int phy_reset = 0;
   5372 	int i, error = 0;
   5373 	uint32_t reg;
   5374 	uint16_t kmreg;
   5375 	int rv;
   5376 
   5377 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5378 		device_xname(sc->sc_dev), __func__));
   5379 	KASSERT(sc->sc_type != 0);
   5380 
   5381 	/*
   5382 	 * Allocate on-chip memory according to the MTU size.
   5383 	 * The Packet Buffer Allocation register must be written
   5384 	 * before the chip is reset.
   5385 	 */
   5386 	switch (sc->sc_type) {
   5387 	case WM_T_82547:
   5388 	case WM_T_82547_2:
   5389 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5390 		    PBA_22K : PBA_30K;
   5391 		for (i = 0; i < sc->sc_nqueues; i++) {
   5392 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5393 			txq->txq_fifo_head = 0;
   5394 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   5395 			txq->txq_fifo_size =
   5396 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   5397 			txq->txq_fifo_stall = 0;
   5398 		}
   5399 		break;
   5400 	case WM_T_82571:
   5401 	case WM_T_82572:
   5402 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   5403 	case WM_T_80003:
   5404 		sc->sc_pba = PBA_32K;
   5405 		break;
   5406 	case WM_T_82573:
   5407 		sc->sc_pba = PBA_12K;
   5408 		break;
   5409 	case WM_T_82574:
   5410 	case WM_T_82583:
   5411 		sc->sc_pba = PBA_20K;
   5412 		break;
   5413 	case WM_T_82576:
   5414 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   5415 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   5416 		break;
   5417 	case WM_T_82580:
   5418 	case WM_T_I350:
   5419 	case WM_T_I354:
   5420 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   5421 		break;
   5422 	case WM_T_I210:
   5423 	case WM_T_I211:
   5424 		sc->sc_pba = PBA_34K;
   5425 		break;
   5426 	case WM_T_ICH8:
   5427 		/* Workaround for a bit corruption issue in FIFO memory */
   5428 		sc->sc_pba = PBA_8K;
   5429 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   5430 		break;
   5431 	case WM_T_ICH9:
   5432 	case WM_T_ICH10:
   5433 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   5434 		    PBA_14K : PBA_10K;
   5435 		break;
   5436 	case WM_T_PCH:
   5437 	case WM_T_PCH2:	/* XXX 14K? */
   5438 	case WM_T_PCH_LPT:
   5439 	case WM_T_PCH_SPT:
   5440 	case WM_T_PCH_CNP:
   5441 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
   5442 		    PBA_12K : PBA_26K;
   5443 		break;
   5444 	default:
   5445 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5446 		    PBA_40K : PBA_48K;
   5447 		break;
   5448 	}
   5449 	/*
   5450 	 * Only old or non-multiqueue devices have the PBA register
   5451 	 * XXX Need special handling for 82575.
   5452 	 */
   5453 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5454 	    || (sc->sc_type == WM_T_82575))
   5455 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   5456 
   5457 	/* Prevent the PCI-E bus from sticking */
   5458 	if (sc->sc_flags & WM_F_PCIE) {
   5459 		int timeout = 800;
   5460 
   5461 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   5462 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5463 
   5464 		while (timeout--) {
   5465 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   5466 			    == 0)
   5467 				break;
   5468 			delay(100);
   5469 		}
   5470 		if (timeout == 0)
   5471 			device_printf(sc->sc_dev,
   5472 			    "failed to disable bus mastering\n");
   5473 	}
   5474 
   5475 	/* Set the completion timeout for interface */
   5476 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   5477 	    || (sc->sc_type == WM_T_82580)
   5478 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5479 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   5480 		wm_set_pcie_completion_timeout(sc);
   5481 
   5482 	/* Clear interrupt */
   5483 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5484 	if (wm_is_using_msix(sc)) {
   5485 		if (sc->sc_type != WM_T_82574) {
   5486 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5487 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5488 		} else
   5489 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5490 	}
   5491 
   5492 	/* Stop the transmit and receive processes. */
   5493 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5494 	sc->sc_rctl &= ~RCTL_EN;
   5495 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   5496 	CSR_WRITE_FLUSH(sc);
   5497 
   5498 	/* XXX set_tbi_sbp_82543() */
   5499 
   5500 	delay(10*1000);
   5501 
   5502 	/* Must acquire the MDIO ownership before MAC reset */
   5503 	switch (sc->sc_type) {
   5504 	case WM_T_82573:
   5505 	case WM_T_82574:
   5506 	case WM_T_82583:
   5507 		error = wm_get_hw_semaphore_82573(sc);
   5508 		break;
   5509 	default:
   5510 		break;
   5511 	}
   5512 
   5513 	/*
   5514 	 * 82541 Errata 29? & 82547 Errata 28?
   5515 	 * See also the description about PHY_RST bit in CTRL register
   5516 	 * in 8254x_GBe_SDM.pdf.
   5517 	 */
   5518 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   5519 		CSR_WRITE(sc, WMREG_CTRL,
   5520 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   5521 		CSR_WRITE_FLUSH(sc);
   5522 		delay(5000);
   5523 	}
   5524 
   5525 	switch (sc->sc_type) {
   5526 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   5527 	case WM_T_82541:
   5528 	case WM_T_82541_2:
   5529 	case WM_T_82547:
   5530 	case WM_T_82547_2:
   5531 		/*
   5532 		 * On some chipsets, a reset through a memory-mapped write
   5533 		 * cycle can cause the chip to reset before completing the
   5534 		 * write cycle. This causes major headache that can be avoided
   5535 		 * by issuing the reset via indirect register writes through
   5536 		 * I/O space.
   5537 		 *
   5538 		 * So, if we successfully mapped the I/O BAR at attach time,
   5539 		 * use that. Otherwise, try our luck with a memory-mapped
   5540 		 * reset.
   5541 		 */
   5542 		if (sc->sc_flags & WM_F_IOH_VALID)
   5543 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   5544 		else
   5545 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   5546 		break;
   5547 	case WM_T_82545_3:
   5548 	case WM_T_82546_3:
   5549 		/* Use the shadow control register on these chips. */
   5550 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   5551 		break;
   5552 	case WM_T_80003:
   5553 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5554 		if (sc->phy.acquire(sc) != 0)
   5555 			break;
   5556 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5557 		sc->phy.release(sc);
   5558 		break;
   5559 	case WM_T_ICH8:
   5560 	case WM_T_ICH9:
   5561 	case WM_T_ICH10:
   5562 	case WM_T_PCH:
   5563 	case WM_T_PCH2:
   5564 	case WM_T_PCH_LPT:
   5565 	case WM_T_PCH_SPT:
   5566 	case WM_T_PCH_CNP:
   5567 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5568 		if (wm_phy_resetisblocked(sc) == false) {
   5569 			/*
   5570 			 * Gate automatic PHY configuration by hardware on
   5571 			 * non-managed 82579
   5572 			 */
   5573 			if ((sc->sc_type == WM_T_PCH2)
   5574 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   5575 				== 0))
   5576 				wm_gate_hw_phy_config_ich8lan(sc, true);
   5577 
   5578 			reg |= CTRL_PHY_RESET;
   5579 			phy_reset = 1;
   5580 		} else
   5581 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   5582 		if (sc->phy.acquire(sc) != 0)
   5583 			break;
   5584 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5585 		/* Don't insert a completion barrier when reset */
   5586 		delay(20*1000);
   5587 		/*
   5588 		 * The EXTCNFCTR_MDIO_SW_OWNERSHIP bit is cleared by the reset,
   5589 		 * so don't use sc->phy.release(sc). Release sc_ich_phymtx
   5590 		 * only. See also wm_get_swflag_ich8lan().
   5591 		 */
   5592 		mutex_exit(sc->sc_ich_phymtx);
   5593 		break;
   5594 	case WM_T_82580:
   5595 	case WM_T_I350:
   5596 	case WM_T_I354:
   5597 	case WM_T_I210:
   5598 	case WM_T_I211:
   5599 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5600 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5601 			CSR_WRITE_FLUSH(sc);
   5602 		delay(5000);
   5603 		break;
   5604 	case WM_T_82542_2_0:
   5605 	case WM_T_82542_2_1:
   5606 	case WM_T_82543:
   5607 	case WM_T_82540:
   5608 	case WM_T_82545:
   5609 	case WM_T_82546:
   5610 	case WM_T_82571:
   5611 	case WM_T_82572:
   5612 	case WM_T_82573:
   5613 	case WM_T_82574:
   5614 	case WM_T_82575:
   5615 	case WM_T_82576:
   5616 	case WM_T_82583:
   5617 	default:
   5618 		/* Everything else can safely use the documented method. */
   5619 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5620 		break;
   5621 	}
   5622 
   5623 	/* Must release the MDIO ownership after MAC reset */
   5624 	switch (sc->sc_type) {
   5625 	case WM_T_82573:
   5626 	case WM_T_82574:
   5627 	case WM_T_82583:
   5628 		if (error == 0)
   5629 			wm_put_hw_semaphore_82573(sc);
   5630 		break;
   5631 	default:
   5632 		break;
   5633 	}
   5634 
   5635 	/* Set Phy Config Counter to 50msec */
   5636 	if (sc->sc_type == WM_T_PCH2) {
   5637 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5638 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5639 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5640 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5641 	}
   5642 
   5643 	if (phy_reset != 0)
   5644 		wm_get_cfg_done(sc);
   5645 
   5646 	/* Reload EEPROM */
   5647 	switch (sc->sc_type) {
   5648 	case WM_T_82542_2_0:
   5649 	case WM_T_82542_2_1:
   5650 	case WM_T_82543:
   5651 	case WM_T_82544:
   5652 		delay(10);
   5653 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5654 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5655 		CSR_WRITE_FLUSH(sc);
   5656 		delay(2000);
   5657 		break;
   5658 	case WM_T_82540:
   5659 	case WM_T_82545:
   5660 	case WM_T_82545_3:
   5661 	case WM_T_82546:
   5662 	case WM_T_82546_3:
   5663 		delay(5*1000);
   5664 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5665 		break;
   5666 	case WM_T_82541:
   5667 	case WM_T_82541_2:
   5668 	case WM_T_82547:
   5669 	case WM_T_82547_2:
   5670 		delay(20000);
   5671 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5672 		break;
   5673 	case WM_T_82571:
   5674 	case WM_T_82572:
   5675 	case WM_T_82573:
   5676 	case WM_T_82574:
   5677 	case WM_T_82583:
   5678 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5679 			delay(10);
   5680 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5681 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5682 			CSR_WRITE_FLUSH(sc);
   5683 		}
   5684 		/* check EECD_EE_AUTORD */
   5685 		wm_get_auto_rd_done(sc);
   5686 		/*
   5687 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5688 		 * is set.
   5689 		 */
   5690 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5691 		    || (sc->sc_type == WM_T_82583))
   5692 			delay(25*1000);
   5693 		break;
   5694 	case WM_T_82575:
   5695 	case WM_T_82576:
   5696 	case WM_T_82580:
   5697 	case WM_T_I350:
   5698 	case WM_T_I354:
   5699 	case WM_T_I210:
   5700 	case WM_T_I211:
   5701 	case WM_T_80003:
   5702 		/* check EECD_EE_AUTORD */
   5703 		wm_get_auto_rd_done(sc);
   5704 		break;
   5705 	case WM_T_ICH8:
   5706 	case WM_T_ICH9:
   5707 	case WM_T_ICH10:
   5708 	case WM_T_PCH:
   5709 	case WM_T_PCH2:
   5710 	case WM_T_PCH_LPT:
   5711 	case WM_T_PCH_SPT:
   5712 	case WM_T_PCH_CNP:
   5713 		break;
   5714 	default:
   5715 		panic("%s: unknown type\n", __func__);
   5716 	}
   5717 
   5718 	/* Check whether EEPROM is present or not */
   5719 	switch (sc->sc_type) {
   5720 	case WM_T_82575:
   5721 	case WM_T_82576:
   5722 	case WM_T_82580:
   5723 	case WM_T_I350:
   5724 	case WM_T_I354:
   5725 	case WM_T_ICH8:
   5726 	case WM_T_ICH9:
   5727 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5728 			/* Not found */
   5729 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5730 			if (sc->sc_type == WM_T_82575)
   5731 				wm_reset_init_script_82575(sc);
   5732 		}
   5733 		break;
   5734 	default:
   5735 		break;
   5736 	}
   5737 
   5738 	if (phy_reset != 0)
   5739 		wm_phy_post_reset(sc);
   5740 
   5741 	if ((sc->sc_type == WM_T_82580)
   5742 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5743 		/* Clear global device reset status bit */
   5744 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5745 	}
   5746 
   5747 	/* Clear any pending interrupt events. */
   5748 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5749 	reg = CSR_READ(sc, WMREG_ICR);
   5750 	if (wm_is_using_msix(sc)) {
   5751 		if (sc->sc_type != WM_T_82574) {
   5752 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5753 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5754 		} else
   5755 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5756 	}
   5757 
   5758 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5759 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5760 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5761 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5762 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5763 		reg |= KABGTXD_BGSQLBIAS;
   5764 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5765 	}
   5766 
   5767 	/* Reload sc_ctrl */
   5768 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5769 
   5770 	wm_set_eee(sc);
   5771 
   5772 	/*
   5773 	 * For PCH, this write will make sure that any noise will be detected
   5774 	 * as a CRC error and be dropped rather than show up as a bad packet
   5775 	 * to the DMA engine
   5776 	 */
   5777 	if (sc->sc_type == WM_T_PCH)
   5778 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5779 
   5780 	if (sc->sc_type >= WM_T_82544)
   5781 		CSR_WRITE(sc, WMREG_WUC, 0);
   5782 
   5783 	if (sc->sc_type < WM_T_82575)
   5784 		wm_disable_aspm(sc); /* Workaround for some chips */
   5785 
   5786 	wm_reset_mdicnfg_82580(sc);
   5787 
   5788 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5789 		wm_pll_workaround_i210(sc);
   5790 
   5791 	if (sc->sc_type == WM_T_80003) {
   5792 		/* Default to TRUE to enable the MDIC W/A */
   5793 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5794 
   5795 		rv = wm_kmrn_readreg(sc,
   5796 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5797 		if (rv == 0) {
   5798 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5799 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5800 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5801 			else
   5802 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5803 		}
   5804 	}
   5805 }
   5806 
   5807 /*
   5808  * wm_add_rxbuf:
   5809  *
   5810  *	Add a receive buffer to the indiciated descriptor.
   5811  */
   5812 static int
   5813 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5814 {
   5815 	struct wm_softc *sc = rxq->rxq_sc;
   5816 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5817 	struct mbuf *m;
   5818 	int error;
   5819 
   5820 	KASSERT(mutex_owned(rxq->rxq_lock));
   5821 
   5822 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5823 	if (m == NULL)
   5824 		return ENOBUFS;
   5825 
   5826 	MCLGET(m, M_DONTWAIT);
   5827 	if ((m->m_flags & M_EXT) == 0) {
   5828 		m_freem(m);
   5829 		return ENOBUFS;
   5830 	}
   5831 
   5832 	if (rxs->rxs_mbuf != NULL)
   5833 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5834 
   5835 	rxs->rxs_mbuf = m;
   5836 
   5837 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5838 	/*
   5839 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5840 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5841 	 */
   5842 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5843 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5844 	if (error) {
   5845 		/* XXX XXX XXX */
   5846 		aprint_error_dev(sc->sc_dev,
   5847 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5848 		panic("wm_add_rxbuf");
   5849 	}
   5850 
   5851 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5852 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5853 
   5854 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5855 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5856 			wm_init_rxdesc(rxq, idx);
   5857 	} else
   5858 		wm_init_rxdesc(rxq, idx);
   5859 
   5860 	return 0;
   5861 }
   5862 
   5863 /*
   5864  * wm_rxdrain:
   5865  *
   5866  *	Drain the receive queue.
   5867  */
   5868 static void
   5869 wm_rxdrain(struct wm_rxqueue *rxq)
   5870 {
   5871 	struct wm_softc *sc = rxq->rxq_sc;
   5872 	struct wm_rxsoft *rxs;
   5873 	int i;
   5874 
   5875 	KASSERT(mutex_owned(rxq->rxq_lock));
   5876 
   5877 	for (i = 0; i < WM_NRXDESC; i++) {
   5878 		rxs = &rxq->rxq_soft[i];
   5879 		if (rxs->rxs_mbuf != NULL) {
   5880 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5881 			m_freem(rxs->rxs_mbuf);
   5882 			rxs->rxs_mbuf = NULL;
   5883 		}
   5884 	}
   5885 }
   5886 
   5887 /*
   5888  * Setup registers for RSS.
   5889  *
   5890  * XXX not yet VMDq support
   5891  */
   5892 static void
   5893 wm_init_rss(struct wm_softc *sc)
   5894 {
   5895 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5896 	int i;
   5897 
   5898 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5899 
   5900 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5901 		unsigned int qid, reta_ent;
   5902 
   5903 		qid  = i % sc->sc_nqueues;
   5904 		switch (sc->sc_type) {
   5905 		case WM_T_82574:
   5906 			reta_ent = __SHIFTIN(qid,
   5907 			    RETA_ENT_QINDEX_MASK_82574);
   5908 			break;
   5909 		case WM_T_82575:
   5910 			reta_ent = __SHIFTIN(qid,
   5911 			    RETA_ENT_QINDEX1_MASK_82575);
   5912 			break;
   5913 		default:
   5914 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5915 			break;
   5916 		}
   5917 
   5918 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5919 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5920 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5921 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5922 	}
   5923 
   5924 	rss_getkey((uint8_t *)rss_key);
   5925 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5926 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5927 
   5928 	if (sc->sc_type == WM_T_82574)
   5929 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5930 	else
   5931 		mrqc = MRQC_ENABLE_RSS_MQ;
   5932 
   5933 	/*
   5934 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5935 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5936 	 */
   5937 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5938 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5939 #if 0
   5940 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5941 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   5942 #endif
   5943 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   5944 
   5945 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5946 }
   5947 
   5948 /*
   5949  * Adjust TX and RX queue numbers which the system actulally uses.
   5950  *
   5951  * The numbers are affected by below parameters.
   5952  *     - The nubmer of hardware queues
   5953  *     - The number of MSI-X vectors (= "nvectors" argument)
   5954  *     - ncpu
   5955  */
   5956 static void
   5957 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5958 {
   5959 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5960 
   5961 	if (nvectors < 2) {
   5962 		sc->sc_nqueues = 1;
   5963 		return;
   5964 	}
   5965 
   5966 	switch (sc->sc_type) {
   5967 	case WM_T_82572:
   5968 		hw_ntxqueues = 2;
   5969 		hw_nrxqueues = 2;
   5970 		break;
   5971 	case WM_T_82574:
   5972 		hw_ntxqueues = 2;
   5973 		hw_nrxqueues = 2;
   5974 		break;
   5975 	case WM_T_82575:
   5976 		hw_ntxqueues = 4;
   5977 		hw_nrxqueues = 4;
   5978 		break;
   5979 	case WM_T_82576:
   5980 		hw_ntxqueues = 16;
   5981 		hw_nrxqueues = 16;
   5982 		break;
   5983 	case WM_T_82580:
   5984 	case WM_T_I350:
   5985 	case WM_T_I354:
   5986 		hw_ntxqueues = 8;
   5987 		hw_nrxqueues = 8;
   5988 		break;
   5989 	case WM_T_I210:
   5990 		hw_ntxqueues = 4;
   5991 		hw_nrxqueues = 4;
   5992 		break;
   5993 	case WM_T_I211:
   5994 		hw_ntxqueues = 2;
   5995 		hw_nrxqueues = 2;
   5996 		break;
   5997 		/*
   5998 		 * The below Ethernet controllers do not support MSI-X;
   5999 		 * this driver doesn't let them use multiqueue.
   6000 		 *     - WM_T_80003
   6001 		 *     - WM_T_ICH8
   6002 		 *     - WM_T_ICH9
   6003 		 *     - WM_T_ICH10
   6004 		 *     - WM_T_PCH
   6005 		 *     - WM_T_PCH2
   6006 		 *     - WM_T_PCH_LPT
   6007 		 */
   6008 	default:
   6009 		hw_ntxqueues = 1;
   6010 		hw_nrxqueues = 1;
   6011 		break;
   6012 	}
   6013 
   6014 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   6015 
   6016 	/*
   6017 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   6018 	 * the number of queues used actually.
   6019 	 */
   6020 	if (nvectors < hw_nqueues + 1)
   6021 		sc->sc_nqueues = nvectors - 1;
   6022 	else
   6023 		sc->sc_nqueues = hw_nqueues;
   6024 
   6025 	/*
   6026 	 * As queues more than CPUs cannot improve scaling, we limit
   6027 	 * the number of queues used actually.
   6028 	 */
   6029 	if (ncpu < sc->sc_nqueues)
   6030 		sc->sc_nqueues = ncpu;
   6031 }
   6032 
   6033 static inline bool
   6034 wm_is_using_msix(struct wm_softc *sc)
   6035 {
   6036 
   6037 	return (sc->sc_nintrs > 1);
   6038 }
   6039 
   6040 static inline bool
   6041 wm_is_using_multiqueue(struct wm_softc *sc)
   6042 {
   6043 
   6044 	return (sc->sc_nqueues > 1);
   6045 }
   6046 
   6047 static int
   6048 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   6049 {
   6050 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   6051 
   6052 	wmq->wmq_id = qidx;
   6053 	wmq->wmq_intr_idx = intr_idx;
   6054 	wmq->wmq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
   6055 	    wm_handle_queue, wmq);
   6056 	if (wmq->wmq_si != NULL)
   6057 		return 0;
   6058 
   6059 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   6060 	    wmq->wmq_id);
   6061 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   6062 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6063 	return ENOMEM;
   6064 }
   6065 
   6066 /*
   6067  * Both single interrupt MSI and INTx can use this function.
   6068  */
   6069 static int
   6070 wm_setup_legacy(struct wm_softc *sc)
   6071 {
   6072 	pci_chipset_tag_t pc = sc->sc_pc;
   6073 	const char *intrstr = NULL;
   6074 	char intrbuf[PCI_INTRSTR_LEN];
   6075 	int error;
   6076 
   6077 	error = wm_alloc_txrx_queues(sc);
   6078 	if (error) {
   6079 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6080 		    error);
   6081 		return ENOMEM;
   6082 	}
   6083 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   6084 	    sizeof(intrbuf));
   6085 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   6086 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   6087 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   6088 	if (sc->sc_ihs[0] == NULL) {
   6089 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   6090 		    (pci_intr_type(pc, sc->sc_intrs[0])
   6091 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   6092 		return ENOMEM;
   6093 	}
   6094 
   6095 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   6096 	sc->sc_nintrs = 1;
   6097 
   6098 	return wm_softint_establish_queue(sc, 0, 0);
   6099 }
   6100 
   6101 static int
   6102 wm_setup_msix(struct wm_softc *sc)
   6103 {
   6104 	void *vih;
   6105 	kcpuset_t *affinity;
   6106 	int qidx, error, intr_idx, txrx_established;
   6107 	pci_chipset_tag_t pc = sc->sc_pc;
   6108 	const char *intrstr = NULL;
   6109 	char intrbuf[PCI_INTRSTR_LEN];
   6110 	char intr_xname[INTRDEVNAMEBUF];
   6111 
   6112 	if (sc->sc_nqueues < ncpu) {
   6113 		/*
   6114 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   6115 		 * interrupts start from CPU#1.
   6116 		 */
   6117 		sc->sc_affinity_offset = 1;
   6118 	} else {
   6119 		/*
   6120 		 * In this case, this device use all CPUs. So, we unify
   6121 		 * affinitied cpu_index to msix vector number for readability.
   6122 		 */
   6123 		sc->sc_affinity_offset = 0;
   6124 	}
   6125 
   6126 	error = wm_alloc_txrx_queues(sc);
   6127 	if (error) {
   6128 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6129 		    error);
   6130 		return ENOMEM;
   6131 	}
   6132 
   6133 	kcpuset_create(&affinity, false);
   6134 	intr_idx = 0;
   6135 
   6136 	/*
   6137 	 * TX and RX
   6138 	 */
   6139 	txrx_established = 0;
   6140 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6141 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6142 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   6143 
   6144 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6145 		    sizeof(intrbuf));
   6146 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   6147 		    PCI_INTR_MPSAFE, true);
   6148 		memset(intr_xname, 0, sizeof(intr_xname));
   6149 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   6150 		    device_xname(sc->sc_dev), qidx);
   6151 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6152 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   6153 		if (vih == NULL) {
   6154 			aprint_error_dev(sc->sc_dev,
   6155 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   6156 			    intrstr ? " at " : "",
   6157 			    intrstr ? intrstr : "");
   6158 
   6159 			goto fail;
   6160 		}
   6161 		kcpuset_zero(affinity);
   6162 		/* Round-robin affinity */
   6163 		kcpuset_set(affinity, affinity_to);
   6164 		error = interrupt_distribute(vih, affinity, NULL);
   6165 		if (error == 0) {
   6166 			aprint_normal_dev(sc->sc_dev,
   6167 			    "for TX and RX interrupting at %s affinity to %u\n",
   6168 			    intrstr, affinity_to);
   6169 		} else {
   6170 			aprint_normal_dev(sc->sc_dev,
   6171 			    "for TX and RX interrupting at %s\n", intrstr);
   6172 		}
   6173 		sc->sc_ihs[intr_idx] = vih;
   6174 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   6175 			goto fail;
   6176 		txrx_established++;
   6177 		intr_idx++;
   6178 	}
   6179 
   6180 	/* LINK */
   6181 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6182 	    sizeof(intrbuf));
   6183 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   6184 	memset(intr_xname, 0, sizeof(intr_xname));
   6185 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   6186 	    device_xname(sc->sc_dev));
   6187 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6188 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   6189 	if (vih == NULL) {
   6190 		aprint_error_dev(sc->sc_dev,
   6191 		    "unable to establish MSI-X(for LINK)%s%s\n",
   6192 		    intrstr ? " at " : "",
   6193 		    intrstr ? intrstr : "");
   6194 
   6195 		goto fail;
   6196 	}
   6197 	/* Keep default affinity to LINK interrupt */
   6198 	aprint_normal_dev(sc->sc_dev,
   6199 	    "for LINK interrupting at %s\n", intrstr);
   6200 	sc->sc_ihs[intr_idx] = vih;
   6201 	sc->sc_link_intr_idx = intr_idx;
   6202 
   6203 	sc->sc_nintrs = sc->sc_nqueues + 1;
   6204 	kcpuset_destroy(affinity);
   6205 	return 0;
   6206 
   6207 fail:
   6208 	for (qidx = 0; qidx < txrx_established; qidx++) {
   6209 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6210 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   6211 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6212 	}
   6213 
   6214 	kcpuset_destroy(affinity);
   6215 	return ENOMEM;
   6216 }
   6217 
   6218 static void
   6219 wm_unset_stopping_flags(struct wm_softc *sc)
   6220 {
   6221 	int i;
   6222 
   6223 	KASSERT(mutex_owned(sc->sc_core_lock));
   6224 
   6225 	/* Must unset stopping flags in ascending order. */
   6226 	for (i = 0; i < sc->sc_nqueues; i++) {
   6227 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6228 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6229 
   6230 		mutex_enter(txq->txq_lock);
   6231 		txq->txq_stopping = false;
   6232 		mutex_exit(txq->txq_lock);
   6233 
   6234 		mutex_enter(rxq->rxq_lock);
   6235 		rxq->rxq_stopping = false;
   6236 		mutex_exit(rxq->rxq_lock);
   6237 	}
   6238 
   6239 	sc->sc_core_stopping = false;
   6240 }
   6241 
   6242 static void
   6243 wm_set_stopping_flags(struct wm_softc *sc)
   6244 {
   6245 	int i;
   6246 
   6247 	KASSERT(mutex_owned(sc->sc_core_lock));
   6248 
   6249 	sc->sc_core_stopping = true;
   6250 
   6251 	/* Must set stopping flags in ascending order. */
   6252 	for (i = 0; i < sc->sc_nqueues; i++) {
   6253 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6254 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6255 
   6256 		mutex_enter(rxq->rxq_lock);
   6257 		rxq->rxq_stopping = true;
   6258 		mutex_exit(rxq->rxq_lock);
   6259 
   6260 		mutex_enter(txq->txq_lock);
   6261 		txq->txq_stopping = true;
   6262 		mutex_exit(txq->txq_lock);
   6263 	}
   6264 }
   6265 
   6266 /*
   6267  * Write interrupt interval value to ITR or EITR
   6268  */
   6269 static void
   6270 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   6271 {
   6272 
   6273 	if (!wmq->wmq_set_itr)
   6274 		return;
   6275 
   6276 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6277 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   6278 
   6279 		/*
   6280 		 * 82575 doesn't have CNT_INGR field.
   6281 		 * So, overwrite counter field by software.
   6282 		 */
   6283 		if (sc->sc_type == WM_T_82575)
   6284 			eitr |= __SHIFTIN(wmq->wmq_itr,
   6285 			    EITR_COUNTER_MASK_82575);
   6286 		else
   6287 			eitr |= EITR_CNT_INGR;
   6288 
   6289 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   6290 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   6291 		/*
   6292 		 * 82574 has both ITR and EITR. SET EITR when we use
   6293 		 * the multi queue function with MSI-X.
   6294 		 */
   6295 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   6296 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   6297 	} else {
   6298 		KASSERT(wmq->wmq_id == 0);
   6299 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   6300 	}
   6301 
   6302 	wmq->wmq_set_itr = false;
   6303 }
   6304 
   6305 /*
   6306  * TODO
   6307  * Below dynamic calculation of itr is almost the same as Linux igb,
   6308  * however it does not fit to wm(4). So, we will have been disable AIM
   6309  * until we will find appropriate calculation of itr.
   6310  */
   6311 /*
   6312  * Calculate interrupt interval value to be going to write register in
   6313  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   6314  */
   6315 static void
   6316 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   6317 {
   6318 #ifdef NOTYET
   6319 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6320 	struct wm_txqueue *txq = &wmq->wmq_txq;
   6321 	uint32_t avg_size = 0;
   6322 	uint32_t new_itr;
   6323 
   6324 	if (rxq->rxq_packets)
   6325 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   6326 	if (txq->txq_packets)
   6327 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   6328 
   6329 	if (avg_size == 0) {
   6330 		new_itr = 450; /* restore default value */
   6331 		goto out;
   6332 	}
   6333 
   6334 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   6335 	avg_size += 24;
   6336 
   6337 	/* Don't starve jumbo frames */
   6338 	avg_size = uimin(avg_size, 3000);
   6339 
   6340 	/* Give a little boost to mid-size frames */
   6341 	if ((avg_size > 300) && (avg_size < 1200))
   6342 		new_itr = avg_size / 3;
   6343 	else
   6344 		new_itr = avg_size / 2;
   6345 
   6346 out:
   6347 	/*
   6348 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   6349 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   6350 	 */
   6351 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   6352 		new_itr *= 4;
   6353 
   6354 	if (new_itr != wmq->wmq_itr) {
   6355 		wmq->wmq_itr = new_itr;
   6356 		wmq->wmq_set_itr = true;
   6357 	} else
   6358 		wmq->wmq_set_itr = false;
   6359 
   6360 	rxq->rxq_packets = 0;
   6361 	rxq->rxq_bytes = 0;
   6362 	txq->txq_packets = 0;
   6363 	txq->txq_bytes = 0;
   6364 #endif
   6365 }
   6366 
   6367 static void
   6368 wm_init_sysctls(struct wm_softc *sc)
   6369 {
   6370 	struct sysctllog **log;
   6371 	const struct sysctlnode *rnode, *qnode, *cnode;
   6372 	int i, rv;
   6373 	const char *dvname;
   6374 
   6375 	log = &sc->sc_sysctllog;
   6376 	dvname = device_xname(sc->sc_dev);
   6377 
   6378 	rv = sysctl_createv(log, 0, NULL, &rnode,
   6379 	    0, CTLTYPE_NODE, dvname,
   6380 	    SYSCTL_DESCR("wm information and settings"),
   6381 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   6382 	if (rv != 0)
   6383 		goto err;
   6384 
   6385 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6386 	    CTLTYPE_BOOL, "txrx_workqueue",
   6387 	    SYSCTL_DESCR("Use workqueue for packet processing"),
   6388 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   6389 	if (rv != 0)
   6390 		goto teardown;
   6391 
   6392 	for (i = 0; i < sc->sc_nqueues; i++) {
   6393 		struct wm_queue *wmq = &sc->sc_queue[i];
   6394 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6395 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6396 
   6397 		snprintf(sc->sc_queue[i].sysctlname,
   6398 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
   6399 
   6400 		if (sysctl_createv(log, 0, &rnode, &qnode,
   6401 		    0, CTLTYPE_NODE,
   6402 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
   6403 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   6404 			break;
   6405 
   6406 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6407 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6408 		    "txq_free", SYSCTL_DESCR("TX queue free"),
   6409 		    NULL, 0, &txq->txq_free,
   6410 		    0, CTL_CREATE, CTL_EOL) != 0)
   6411 			break;
   6412 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6413 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6414 		    "txd_head", SYSCTL_DESCR("TX descriptor head"),
   6415 		    wm_sysctl_tdh_handler, 0, (void *)txq,
   6416 		    0, CTL_CREATE, CTL_EOL) != 0)
   6417 			break;
   6418 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6419 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6420 		    "txd_tail", SYSCTL_DESCR("TX descriptor tail"),
   6421 		    wm_sysctl_tdt_handler, 0, (void *)txq,
   6422 		    0, CTL_CREATE, CTL_EOL) != 0)
   6423 			break;
   6424 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6425 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6426 		    "txq_next", SYSCTL_DESCR("TX queue next"),
   6427 		    NULL, 0, &txq->txq_next,
   6428 		    0, CTL_CREATE, CTL_EOL) != 0)
   6429 			break;
   6430 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6431 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6432 		    "txq_sfree", SYSCTL_DESCR("TX queue sfree"),
   6433 		    NULL, 0, &txq->txq_sfree,
   6434 		    0, CTL_CREATE, CTL_EOL) != 0)
   6435 			break;
   6436 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6437 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6438 		    "txq_snext", SYSCTL_DESCR("TX queue snext"),
   6439 		    NULL, 0, &txq->txq_snext,
   6440 		    0, CTL_CREATE, CTL_EOL) != 0)
   6441 			break;
   6442 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6443 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6444 		    "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"),
   6445 		    NULL, 0, &txq->txq_sdirty,
   6446 		    0, CTL_CREATE, CTL_EOL) != 0)
   6447 			break;
   6448 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6449 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6450 		    "txq_flags", SYSCTL_DESCR("TX queue flags"),
   6451 		    NULL, 0, &txq->txq_flags,
   6452 		    0, CTL_CREATE, CTL_EOL) != 0)
   6453 			break;
   6454 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6455 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6456 		    "txq_stopping", SYSCTL_DESCR("TX queue stopping"),
   6457 		    NULL, 0, &txq->txq_stopping,
   6458 		    0, CTL_CREATE, CTL_EOL) != 0)
   6459 			break;
   6460 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6461 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6462 		    "txq_sending", SYSCTL_DESCR("TX queue sending"),
   6463 		    NULL, 0, &txq->txq_sending,
   6464 		    0, CTL_CREATE, CTL_EOL) != 0)
   6465 			break;
   6466 
   6467 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6468 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6469 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
   6470 		    NULL, 0, &rxq->rxq_ptr,
   6471 		    0, CTL_CREATE, CTL_EOL) != 0)
   6472 			break;
   6473 	}
   6474 
   6475 #ifdef WM_DEBUG
   6476 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6477 	    CTLTYPE_INT, "debug_flags",
   6478 	    SYSCTL_DESCR(
   6479 		    "Debug flags:\n"	\
   6480 		    "\t0x01 LINK\n"	\
   6481 		    "\t0x02 TX\n"	\
   6482 		    "\t0x04 RX\n"	\
   6483 		    "\t0x08 GMII\n"	\
   6484 		    "\t0x10 MANAGE\n"	\
   6485 		    "\t0x20 NVM\n"	\
   6486 		    "\t0x40 INIT\n"	\
   6487 		    "\t0x80 LOCK"),
   6488 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
   6489 	if (rv != 0)
   6490 		goto teardown;
   6491 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6492 	    CTLTYPE_BOOL, "trigger_reset",
   6493 	    SYSCTL_DESCR("Trigger an interface reset"),
   6494 	    NULL, 0, &sc->sc_trigger_reset, 0, CTL_CREATE, CTL_EOL);
   6495 	if (rv != 0)
   6496 		goto teardown;
   6497 #endif
   6498 
   6499 	return;
   6500 
   6501 teardown:
   6502 	sysctl_teardown(log);
   6503 err:
   6504 	sc->sc_sysctllog = NULL;
   6505 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   6506 	    __func__, rv);
   6507 }
   6508 
   6509 /*
   6510  * wm_init:		[ifnet interface function]
   6511  *
   6512  *	Initialize the interface.
   6513  */
   6514 static int
   6515 wm_init(struct ifnet *ifp)
   6516 {
   6517 	struct wm_softc *sc = ifp->if_softc;
   6518 	int ret;
   6519 
   6520 	KASSERT(IFNET_LOCKED(ifp));
   6521 
   6522 	if (sc->sc_dying)
   6523 		return ENXIO;
   6524 
   6525 	mutex_enter(sc->sc_core_lock);
   6526 	ret = wm_init_locked(ifp);
   6527 	mutex_exit(sc->sc_core_lock);
   6528 
   6529 	return ret;
   6530 }
   6531 
   6532 static int
   6533 wm_init_locked(struct ifnet *ifp)
   6534 {
   6535 	struct wm_softc *sc = ifp->if_softc;
   6536 	struct ethercom *ec = &sc->sc_ethercom;
   6537 	int i, j, trynum, error = 0;
   6538 	uint32_t reg, sfp_mask = 0;
   6539 
   6540 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6541 		device_xname(sc->sc_dev), __func__));
   6542 	KASSERT(IFNET_LOCKED(ifp));
   6543 	KASSERT(mutex_owned(sc->sc_core_lock));
   6544 
   6545 	/*
   6546 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   6547 	 * There is a small but measurable benefit to avoiding the adjusment
   6548 	 * of the descriptor so that the headers are aligned, for normal mtu,
   6549 	 * on such platforms.  One possibility is that the DMA itself is
   6550 	 * slightly more efficient if the front of the entire packet (instead
   6551 	 * of the front of the headers) is aligned.
   6552 	 *
   6553 	 * Note we must always set align_tweak to 0 if we are using
   6554 	 * jumbo frames.
   6555 	 */
   6556 #ifdef __NO_STRICT_ALIGNMENT
   6557 	sc->sc_align_tweak = 0;
   6558 #else
   6559 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   6560 		sc->sc_align_tweak = 0;
   6561 	else
   6562 		sc->sc_align_tweak = 2;
   6563 #endif /* __NO_STRICT_ALIGNMENT */
   6564 
   6565 	/* Cancel any pending I/O. */
   6566 	wm_stop_locked(ifp, false, false);
   6567 
   6568 	/* Update statistics before reset */
   6569 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   6570 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   6571 
   6572 	/* >= PCH_SPT hardware workaround before reset. */
   6573 	if (sc->sc_type >= WM_T_PCH_SPT)
   6574 		wm_flush_desc_rings(sc);
   6575 
   6576 	/* Reset the chip to a known state. */
   6577 	wm_reset(sc);
   6578 
   6579 	/*
   6580 	 * AMT based hardware can now take control from firmware
   6581 	 * Do this after reset.
   6582 	 */
   6583 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   6584 		wm_get_hw_control(sc);
   6585 
   6586 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   6587 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   6588 		wm_legacy_irq_quirk_spt(sc);
   6589 
   6590 	/* Init hardware bits */
   6591 	wm_initialize_hardware_bits(sc);
   6592 
   6593 	/* Reset the PHY. */
   6594 	if (sc->sc_flags & WM_F_HAS_MII)
   6595 		wm_gmii_reset(sc);
   6596 
   6597 	if (sc->sc_type >= WM_T_ICH8) {
   6598 		reg = CSR_READ(sc, WMREG_GCR);
   6599 		/*
   6600 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   6601 		 * default after reset.
   6602 		 */
   6603 		if (sc->sc_type == WM_T_ICH8)
   6604 			reg |= GCR_NO_SNOOP_ALL;
   6605 		else
   6606 			reg &= ~GCR_NO_SNOOP_ALL;
   6607 		CSR_WRITE(sc, WMREG_GCR, reg);
   6608 	}
   6609 
   6610 	if ((sc->sc_type >= WM_T_ICH8)
   6611 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   6612 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   6613 
   6614 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6615 		reg |= CTRL_EXT_RO_DIS;
   6616 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6617 	}
   6618 
   6619 	/* Calculate (E)ITR value */
   6620 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   6621 		/*
   6622 		 * For NEWQUEUE's EITR (except for 82575).
   6623 		 * 82575's EITR should be set same throttling value as other
   6624 		 * old controllers' ITR because the interrupt/sec calculation
   6625 		 * is the same, that is, 1,000,000,000 / (N * 256).
   6626 		 *
   6627 		 * 82574's EITR should be set same throttling value as ITR.
   6628 		 *
   6629 		 * For N interrupts/sec, set this value to:
   6630 		 * 1,000,000 / N in contrast to ITR throttling value.
   6631 		 */
   6632 		sc->sc_itr_init = 450;
   6633 	} else if (sc->sc_type >= WM_T_82543) {
   6634 		/*
   6635 		 * Set up the interrupt throttling register (units of 256ns)
   6636 		 * Note that a footnote in Intel's documentation says this
   6637 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   6638 		 * or 10Mbit mode.  Empirically, it appears to be the case
   6639 		 * that that is also true for the 1024ns units of the other
   6640 		 * interrupt-related timer registers -- so, really, we ought
   6641 		 * to divide this value by 4 when the link speed is low.
   6642 		 *
   6643 		 * XXX implement this division at link speed change!
   6644 		 */
   6645 
   6646 		/*
   6647 		 * For N interrupts/sec, set this value to:
   6648 		 * 1,000,000,000 / (N * 256).  Note that we set the
   6649 		 * absolute and packet timer values to this value
   6650 		 * divided by 4 to get "simple timer" behavior.
   6651 		 */
   6652 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   6653 	}
   6654 
   6655 	error = wm_init_txrx_queues(sc);
   6656 	if (error)
   6657 		goto out;
   6658 
   6659 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   6660 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   6661 	    (sc->sc_type >= WM_T_82575))
   6662 		wm_serdes_power_up_link_82575(sc);
   6663 
   6664 	/* Clear out the VLAN table -- we don't use it (yet). */
   6665 	CSR_WRITE(sc, WMREG_VET, 0);
   6666 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   6667 		trynum = 10; /* Due to hw errata */
   6668 	else
   6669 		trynum = 1;
   6670 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   6671 		for (j = 0; j < trynum; j++)
   6672 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   6673 
   6674 	/*
   6675 	 * Set up flow-control parameters.
   6676 	 *
   6677 	 * XXX Values could probably stand some tuning.
   6678 	 */
   6679 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   6680 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   6681 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   6682 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   6683 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   6684 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   6685 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   6686 	}
   6687 
   6688 	sc->sc_fcrtl = FCRTL_DFLT;
   6689 	if (sc->sc_type < WM_T_82543) {
   6690 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   6691 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   6692 	} else {
   6693 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   6694 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   6695 	}
   6696 
   6697 	if (sc->sc_type == WM_T_80003)
   6698 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   6699 	else
   6700 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   6701 
   6702 	/* Writes the control register. */
   6703 	wm_set_vlan(sc);
   6704 
   6705 	if (sc->sc_flags & WM_F_HAS_MII) {
   6706 		uint16_t kmreg;
   6707 
   6708 		switch (sc->sc_type) {
   6709 		case WM_T_80003:
   6710 		case WM_T_ICH8:
   6711 		case WM_T_ICH9:
   6712 		case WM_T_ICH10:
   6713 		case WM_T_PCH:
   6714 		case WM_T_PCH2:
   6715 		case WM_T_PCH_LPT:
   6716 		case WM_T_PCH_SPT:
   6717 		case WM_T_PCH_CNP:
   6718 			/*
   6719 			 * Set the mac to wait the maximum time between each
   6720 			 * iteration and increase the max iterations when
   6721 			 * polling the phy; this fixes erroneous timeouts at
   6722 			 * 10Mbps.
   6723 			 */
   6724 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   6725 			    0xFFFF);
   6726 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6727 			    &kmreg);
   6728 			kmreg |= 0x3F;
   6729 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6730 			    kmreg);
   6731 			break;
   6732 		default:
   6733 			break;
   6734 		}
   6735 
   6736 		if (sc->sc_type == WM_T_80003) {
   6737 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6738 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6739 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6740 
   6741 			/* Bypass RX and TX FIFOs */
   6742 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6743 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6744 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6745 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6746 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6747 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6748 		}
   6749 	}
   6750 #if 0
   6751 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6752 #endif
   6753 
   6754 	/* Set up checksum offload parameters. */
   6755 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6756 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6757 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6758 		reg |= RXCSUM_IPOFL;
   6759 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6760 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6761 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6762 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6763 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6764 
   6765 	/* Set registers about MSI-X */
   6766 	if (wm_is_using_msix(sc)) {
   6767 		uint32_t ivar, qintr_idx;
   6768 		struct wm_queue *wmq;
   6769 		unsigned int qid;
   6770 
   6771 		if (sc->sc_type == WM_T_82575) {
   6772 			/* Interrupt control */
   6773 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6774 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6775 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6776 
   6777 			/* TX and RX */
   6778 			for (i = 0; i < sc->sc_nqueues; i++) {
   6779 				wmq = &sc->sc_queue[i];
   6780 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6781 				    EITR_TX_QUEUE(wmq->wmq_id)
   6782 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6783 			}
   6784 			/* Link status */
   6785 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6786 			    EITR_OTHER);
   6787 		} else if (sc->sc_type == WM_T_82574) {
   6788 			/* Interrupt control */
   6789 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6790 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6791 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6792 
   6793 			/*
   6794 			 * Work around issue with spurious interrupts
   6795 			 * in MSI-X mode.
   6796 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6797 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6798 			 */
   6799 			reg = CSR_READ(sc, WMREG_RFCTL);
   6800 			reg |= WMREG_RFCTL_ACKDIS;
   6801 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6802 
   6803 			ivar = 0;
   6804 			/* TX and RX */
   6805 			for (i = 0; i < sc->sc_nqueues; i++) {
   6806 				wmq = &sc->sc_queue[i];
   6807 				qid = wmq->wmq_id;
   6808 				qintr_idx = wmq->wmq_intr_idx;
   6809 
   6810 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6811 				    IVAR_TX_MASK_Q_82574(qid));
   6812 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6813 				    IVAR_RX_MASK_Q_82574(qid));
   6814 			}
   6815 			/* Link status */
   6816 			ivar |= __SHIFTIN((IVAR_VALID_82574
   6817 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   6818 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   6819 		} else {
   6820 			/* Interrupt control */
   6821 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   6822 			    | GPIE_EIAME | GPIE_PBA);
   6823 
   6824 			switch (sc->sc_type) {
   6825 			case WM_T_82580:
   6826 			case WM_T_I350:
   6827 			case WM_T_I354:
   6828 			case WM_T_I210:
   6829 			case WM_T_I211:
   6830 				/* TX and RX */
   6831 				for (i = 0; i < sc->sc_nqueues; i++) {
   6832 					wmq = &sc->sc_queue[i];
   6833 					qid = wmq->wmq_id;
   6834 					qintr_idx = wmq->wmq_intr_idx;
   6835 
   6836 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6837 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6838 					ivar |= __SHIFTIN((qintr_idx
   6839 						| IVAR_VALID),
   6840 					    IVAR_TX_MASK_Q(qid));
   6841 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6842 					ivar |= __SHIFTIN((qintr_idx
   6843 						| IVAR_VALID),
   6844 					    IVAR_RX_MASK_Q(qid));
   6845 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6846 				}
   6847 				break;
   6848 			case WM_T_82576:
   6849 				/* TX and RX */
   6850 				for (i = 0; i < sc->sc_nqueues; i++) {
   6851 					wmq = &sc->sc_queue[i];
   6852 					qid = wmq->wmq_id;
   6853 					qintr_idx = wmq->wmq_intr_idx;
   6854 
   6855 					ivar = CSR_READ(sc,
   6856 					    WMREG_IVAR_Q_82576(qid));
   6857 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6858 					ivar |= __SHIFTIN((qintr_idx
   6859 						| IVAR_VALID),
   6860 					    IVAR_TX_MASK_Q_82576(qid));
   6861 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6862 					ivar |= __SHIFTIN((qintr_idx
   6863 						| IVAR_VALID),
   6864 					    IVAR_RX_MASK_Q_82576(qid));
   6865 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6866 					    ivar);
   6867 				}
   6868 				break;
   6869 			default:
   6870 				break;
   6871 			}
   6872 
   6873 			/* Link status */
   6874 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6875 			    IVAR_MISC_OTHER);
   6876 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6877 		}
   6878 
   6879 		if (wm_is_using_multiqueue(sc)) {
   6880 			wm_init_rss(sc);
   6881 
   6882 			/*
   6883 			** NOTE: Receive Full-Packet Checksum Offload
   6884 			** is mutually exclusive with Multiqueue. However
   6885 			** this is not the same as TCP/IP checksums which
   6886 			** still work.
   6887 			*/
   6888 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6889 			reg |= RXCSUM_PCSD;
   6890 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6891 		}
   6892 	}
   6893 
   6894 	/* Set up the interrupt registers. */
   6895 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6896 
   6897 	/* Enable SFP module insertion interrupt if it's required */
   6898 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6899 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6900 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6901 		sfp_mask = ICR_GPI(0);
   6902 	}
   6903 
   6904 	if (wm_is_using_msix(sc)) {
   6905 		uint32_t mask;
   6906 		struct wm_queue *wmq;
   6907 
   6908 		switch (sc->sc_type) {
   6909 		case WM_T_82574:
   6910 			mask = 0;
   6911 			for (i = 0; i < sc->sc_nqueues; i++) {
   6912 				wmq = &sc->sc_queue[i];
   6913 				mask |= ICR_TXQ(wmq->wmq_id);
   6914 				mask |= ICR_RXQ(wmq->wmq_id);
   6915 			}
   6916 			mask |= ICR_OTHER;
   6917 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6918 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6919 			break;
   6920 		default:
   6921 			if (sc->sc_type == WM_T_82575) {
   6922 				mask = 0;
   6923 				for (i = 0; i < sc->sc_nqueues; i++) {
   6924 					wmq = &sc->sc_queue[i];
   6925 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6926 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6927 				}
   6928 				mask |= EITR_OTHER;
   6929 			} else {
   6930 				mask = 0;
   6931 				for (i = 0; i < sc->sc_nqueues; i++) {
   6932 					wmq = &sc->sc_queue[i];
   6933 					mask |= 1 << wmq->wmq_intr_idx;
   6934 				}
   6935 				mask |= 1 << sc->sc_link_intr_idx;
   6936 			}
   6937 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6938 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6939 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6940 
   6941 			/* For other interrupts */
   6942 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6943 			break;
   6944 		}
   6945 	} else {
   6946 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6947 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6948 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6949 	}
   6950 
   6951 	/* Set up the inter-packet gap. */
   6952 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6953 
   6954 	if (sc->sc_type >= WM_T_82543) {
   6955 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6956 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6957 			wm_itrs_writereg(sc, wmq);
   6958 		}
   6959 		/*
   6960 		 * Link interrupts occur much less than TX
   6961 		 * interrupts and RX interrupts. So, we don't
   6962 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6963 		 * FreeBSD's if_igb.
   6964 		 */
   6965 	}
   6966 
   6967 	/* Set the VLAN EtherType. */
   6968 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6969 
   6970 	/*
   6971 	 * Set up the transmit control register; we start out with
   6972 	 * a collision distance suitable for FDX, but update it when
   6973 	 * we resolve the media type.
   6974 	 */
   6975 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6976 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6977 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6978 	if (sc->sc_type >= WM_T_82571)
   6979 		sc->sc_tctl |= TCTL_MULR;
   6980 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6981 
   6982 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6983 		/* Write TDT after TCTL.EN is set. See the document. */
   6984 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6985 	}
   6986 
   6987 	if (sc->sc_type == WM_T_80003) {
   6988 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6989 		reg &= ~TCTL_EXT_GCEX_MASK;
   6990 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6991 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6992 	}
   6993 
   6994 	/* Set the media. */
   6995 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6996 		goto out;
   6997 
   6998 	/* Configure for OS presence */
   6999 	wm_init_manageability(sc);
   7000 
   7001 	/*
   7002 	 * Set up the receive control register; we actually program the
   7003 	 * register when we set the receive filter. Use multicast address
   7004 	 * offset type 0.
   7005 	 *
   7006 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   7007 	 * don't enable that feature.
   7008 	 */
   7009 	sc->sc_mchash_type = 0;
   7010 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   7011 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   7012 
   7013 	/* 82574 use one buffer extended Rx descriptor. */
   7014 	if (sc->sc_type == WM_T_82574)
   7015 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   7016 
   7017 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
   7018 		sc->sc_rctl |= RCTL_SECRC;
   7019 
   7020 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   7021 	    && (ifp->if_mtu > ETHERMTU)) {
   7022 		sc->sc_rctl |= RCTL_LPE;
   7023 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7024 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   7025 	}
   7026 
   7027 	if (MCLBYTES == 2048)
   7028 		sc->sc_rctl |= RCTL_2k;
   7029 	else {
   7030 		if (sc->sc_type >= WM_T_82543) {
   7031 			switch (MCLBYTES) {
   7032 			case 4096:
   7033 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   7034 				break;
   7035 			case 8192:
   7036 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   7037 				break;
   7038 			case 16384:
   7039 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   7040 				break;
   7041 			default:
   7042 				panic("wm_init: MCLBYTES %d unsupported",
   7043 				    MCLBYTES);
   7044 				break;
   7045 			}
   7046 		} else
   7047 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   7048 	}
   7049 
   7050 	/* Enable ECC */
   7051 	switch (sc->sc_type) {
   7052 	case WM_T_82571:
   7053 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   7054 		reg |= PBA_ECC_CORR_EN;
   7055 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   7056 		break;
   7057 	case WM_T_PCH_LPT:
   7058 	case WM_T_PCH_SPT:
   7059 	case WM_T_PCH_CNP:
   7060 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   7061 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   7062 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   7063 
   7064 		sc->sc_ctrl |= CTRL_MEHE;
   7065 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7066 		break;
   7067 	default:
   7068 		break;
   7069 	}
   7070 
   7071 	/*
   7072 	 * Set the receive filter.
   7073 	 *
   7074 	 * For 82575 and 82576, the RX descriptors must be initialized after
   7075 	 * the setting of RCTL.EN in wm_set_filter()
   7076 	 */
   7077 	wm_set_filter(sc);
   7078 
   7079 	/* On 575 and later set RDT only if RX enabled */
   7080 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7081 		int qidx;
   7082 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7083 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   7084 			for (i = 0; i < WM_NRXDESC; i++) {
   7085 				mutex_enter(rxq->rxq_lock);
   7086 				wm_init_rxdesc(rxq, i);
   7087 				mutex_exit(rxq->rxq_lock);
   7088 
   7089 			}
   7090 		}
   7091 	}
   7092 
   7093 	wm_unset_stopping_flags(sc);
   7094 
   7095 	/* Start the one second link check clock. */
   7096 	callout_schedule(&sc->sc_tick_ch, hz);
   7097 
   7098 	/*
   7099 	 * ...all done! (IFNET_LOCKED asserted above.)
   7100 	 */
   7101 	ifp->if_flags |= IFF_RUNNING;
   7102 
   7103 out:
   7104 	/* Save last flags for the callback */
   7105 	sc->sc_if_flags = ifp->if_flags;
   7106 	sc->sc_ec_capenable = ec->ec_capenable;
   7107 	if (error)
   7108 		log(LOG_ERR, "%s: interface not running\n",
   7109 		    device_xname(sc->sc_dev));
   7110 	return error;
   7111 }
   7112 
   7113 /*
   7114  * wm_stop:		[ifnet interface function]
   7115  *
   7116  *	Stop transmission on the interface.
   7117  */
   7118 static void
   7119 wm_stop(struct ifnet *ifp, int disable)
   7120 {
   7121 	struct wm_softc *sc = ifp->if_softc;
   7122 
   7123 	ASSERT_SLEEPABLE();
   7124 	KASSERT(IFNET_LOCKED(ifp));
   7125 
   7126 	mutex_enter(sc->sc_core_lock);
   7127 	wm_stop_locked(ifp, disable ? true : false, true);
   7128 	mutex_exit(sc->sc_core_lock);
   7129 
   7130 	/*
   7131 	 * After wm_set_stopping_flags(), it is guaranteed that
   7132 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   7133 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   7134 	 * because it can sleep...
   7135 	 * so, call workqueue_wait() here.
   7136 	 */
   7137 	for (int i = 0; i < sc->sc_nqueues; i++)
   7138 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   7139 	workqueue_wait(sc->sc_reset_wq, &sc->sc_reset_work);
   7140 }
   7141 
   7142 static void
   7143 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   7144 {
   7145 	struct wm_softc *sc = ifp->if_softc;
   7146 	struct wm_txsoft *txs;
   7147 	int i, qidx;
   7148 
   7149 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7150 		device_xname(sc->sc_dev), __func__));
   7151 	KASSERT(IFNET_LOCKED(ifp));
   7152 	KASSERT(mutex_owned(sc->sc_core_lock));
   7153 
   7154 	wm_set_stopping_flags(sc);
   7155 
   7156 	if (sc->sc_flags & WM_F_HAS_MII) {
   7157 		/* Down the MII. */
   7158 		mii_down(&sc->sc_mii);
   7159 	} else {
   7160 #if 0
   7161 		/* Should we clear PHY's status properly? */
   7162 		wm_reset(sc);
   7163 #endif
   7164 	}
   7165 
   7166 	/* Stop the transmit and receive processes. */
   7167 	CSR_WRITE(sc, WMREG_TCTL, 0);
   7168 	CSR_WRITE(sc, WMREG_RCTL, 0);
   7169 	sc->sc_rctl &= ~RCTL_EN;
   7170 
   7171 	/*
   7172 	 * Clear the interrupt mask to ensure the device cannot assert its
   7173 	 * interrupt line.
   7174 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   7175 	 * service any currently pending or shared interrupt.
   7176 	 */
   7177 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   7178 	sc->sc_icr = 0;
   7179 	if (wm_is_using_msix(sc)) {
   7180 		if (sc->sc_type != WM_T_82574) {
   7181 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   7182 			CSR_WRITE(sc, WMREG_EIAC, 0);
   7183 		} else
   7184 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   7185 	}
   7186 
   7187 	/*
   7188 	 * Stop callouts after interrupts are disabled; if we have
   7189 	 * to wait for them, we will be releasing the CORE_LOCK
   7190 	 * briefly, which will unblock interrupts on the current CPU.
   7191 	 */
   7192 
   7193 	/* Stop the one second clock. */
   7194 	if (wait)
   7195 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   7196 	else
   7197 		callout_stop(&sc->sc_tick_ch);
   7198 
   7199 	/* Stop the 82547 Tx FIFO stall check timer. */
   7200 	if (sc->sc_type == WM_T_82547) {
   7201 		if (wait)
   7202 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   7203 		else
   7204 			callout_stop(&sc->sc_txfifo_ch);
   7205 	}
   7206 
   7207 	/* Release any queued transmit buffers. */
   7208 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7209 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   7210 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7211 		struct mbuf *m;
   7212 
   7213 		mutex_enter(txq->txq_lock);
   7214 		txq->txq_sending = false; /* Ensure watchdog disabled */
   7215 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7216 			txs = &txq->txq_soft[i];
   7217 			if (txs->txs_mbuf != NULL) {
   7218 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   7219 				m_freem(txs->txs_mbuf);
   7220 				txs->txs_mbuf = NULL;
   7221 			}
   7222 		}
   7223 		/* Drain txq_interq */
   7224 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7225 			m_freem(m);
   7226 		mutex_exit(txq->txq_lock);
   7227 	}
   7228 
   7229 	/* Mark the interface as down and cancel the watchdog timer. */
   7230 	ifp->if_flags &= ~IFF_RUNNING;
   7231 	sc->sc_if_flags = ifp->if_flags;
   7232 
   7233 	if (disable) {
   7234 		for (i = 0; i < sc->sc_nqueues; i++) {
   7235 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7236 			mutex_enter(rxq->rxq_lock);
   7237 			wm_rxdrain(rxq);
   7238 			mutex_exit(rxq->rxq_lock);
   7239 		}
   7240 	}
   7241 
   7242 #if 0 /* notyet */
   7243 	if (sc->sc_type >= WM_T_82544)
   7244 		CSR_WRITE(sc, WMREG_WUC, 0);
   7245 #endif
   7246 }
   7247 
   7248 static void
   7249 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   7250 {
   7251 	struct mbuf *m;
   7252 	int i;
   7253 
   7254 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   7255 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   7256 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   7257 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   7258 		    m->m_data, m->m_len, m->m_flags);
   7259 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   7260 	    i, i == 1 ? "" : "s");
   7261 }
   7262 
   7263 /*
   7264  * wm_82547_txfifo_stall:
   7265  *
   7266  *	Callout used to wait for the 82547 Tx FIFO to drain,
   7267  *	reset the FIFO pointers, and restart packet transmission.
   7268  */
   7269 static void
   7270 wm_82547_txfifo_stall(void *arg)
   7271 {
   7272 	struct wm_softc *sc = arg;
   7273 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7274 
   7275 	mutex_enter(txq->txq_lock);
   7276 
   7277 	if (txq->txq_stopping)
   7278 		goto out;
   7279 
   7280 	if (txq->txq_fifo_stall) {
   7281 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   7282 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   7283 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   7284 			/*
   7285 			 * Packets have drained.  Stop transmitter, reset
   7286 			 * FIFO pointers, restart transmitter, and kick
   7287 			 * the packet queue.
   7288 			 */
   7289 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   7290 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   7291 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   7292 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   7293 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   7294 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   7295 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   7296 			CSR_WRITE_FLUSH(sc);
   7297 
   7298 			txq->txq_fifo_head = 0;
   7299 			txq->txq_fifo_stall = 0;
   7300 			wm_start_locked(&sc->sc_ethercom.ec_if);
   7301 		} else {
   7302 			/*
   7303 			 * Still waiting for packets to drain; try again in
   7304 			 * another tick.
   7305 			 */
   7306 			callout_schedule(&sc->sc_txfifo_ch, 1);
   7307 		}
   7308 	}
   7309 
   7310 out:
   7311 	mutex_exit(txq->txq_lock);
   7312 }
   7313 
   7314 /*
   7315  * wm_82547_txfifo_bugchk:
   7316  *
   7317  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   7318  *	prevent enqueueing a packet that would wrap around the end
   7319  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   7320  *
   7321  *	We do this by checking the amount of space before the end
   7322  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   7323  *	the Tx FIFO, wait for all remaining packets to drain, reset
   7324  *	the internal FIFO pointers to the beginning, and restart
   7325  *	transmission on the interface.
   7326  */
   7327 #define	WM_FIFO_HDR		0x10
   7328 #define	WM_82547_PAD_LEN	0x3e0
   7329 static int
   7330 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   7331 {
   7332 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7333 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   7334 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   7335 
   7336 	/* Just return if already stalled. */
   7337 	if (txq->txq_fifo_stall)
   7338 		return 1;
   7339 
   7340 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   7341 		/* Stall only occurs in half-duplex mode. */
   7342 		goto send_packet;
   7343 	}
   7344 
   7345 	if (len >= WM_82547_PAD_LEN + space) {
   7346 		txq->txq_fifo_stall = 1;
   7347 		callout_schedule(&sc->sc_txfifo_ch, 1);
   7348 		return 1;
   7349 	}
   7350 
   7351 send_packet:
   7352 	txq->txq_fifo_head += len;
   7353 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   7354 		txq->txq_fifo_head -= txq->txq_fifo_size;
   7355 
   7356 	return 0;
   7357 }
   7358 
   7359 static int
   7360 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7361 {
   7362 	int error;
   7363 
   7364 	/*
   7365 	 * Allocate the control data structures, and create and load the
   7366 	 * DMA map for it.
   7367 	 *
   7368 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7369 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7370 	 * both sets within the same 4G segment.
   7371 	 */
   7372 	if (sc->sc_type < WM_T_82544)
   7373 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   7374 	else
   7375 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   7376 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7377 		txq->txq_descsize = sizeof(nq_txdesc_t);
   7378 	else
   7379 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   7380 
   7381 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   7382 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   7383 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   7384 		aprint_error_dev(sc->sc_dev,
   7385 		    "unable to allocate TX control data, error = %d\n",
   7386 		    error);
   7387 		goto fail_0;
   7388 	}
   7389 
   7390 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   7391 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   7392 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7393 		aprint_error_dev(sc->sc_dev,
   7394 		    "unable to map TX control data, error = %d\n", error);
   7395 		goto fail_1;
   7396 	}
   7397 
   7398 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   7399 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   7400 		aprint_error_dev(sc->sc_dev,
   7401 		    "unable to create TX control data DMA map, error = %d\n",
   7402 		    error);
   7403 		goto fail_2;
   7404 	}
   7405 
   7406 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   7407 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   7408 		aprint_error_dev(sc->sc_dev,
   7409 		    "unable to load TX control data DMA map, error = %d\n",
   7410 		    error);
   7411 		goto fail_3;
   7412 	}
   7413 
   7414 	return 0;
   7415 
   7416 fail_3:
   7417 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7418 fail_2:
   7419 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7420 	    WM_TXDESCS_SIZE(txq));
   7421 fail_1:
   7422 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7423 fail_0:
   7424 	return error;
   7425 }
   7426 
   7427 static void
   7428 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7429 {
   7430 
   7431 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   7432 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7433 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7434 	    WM_TXDESCS_SIZE(txq));
   7435 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7436 }
   7437 
   7438 static int
   7439 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7440 {
   7441 	int error;
   7442 	size_t rxq_descs_size;
   7443 
   7444 	/*
   7445 	 * Allocate the control data structures, and create and load the
   7446 	 * DMA map for it.
   7447 	 *
   7448 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7449 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7450 	 * both sets within the same 4G segment.
   7451 	 */
   7452 	rxq->rxq_ndesc = WM_NRXDESC;
   7453 	if (sc->sc_type == WM_T_82574)
   7454 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   7455 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7456 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   7457 	else
   7458 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   7459 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   7460 
   7461 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   7462 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   7463 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   7464 		aprint_error_dev(sc->sc_dev,
   7465 		    "unable to allocate RX control data, error = %d\n",
   7466 		    error);
   7467 		goto fail_0;
   7468 	}
   7469 
   7470 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   7471 		    rxq->rxq_desc_rseg, rxq_descs_size,
   7472 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7473 		aprint_error_dev(sc->sc_dev,
   7474 		    "unable to map RX control data, error = %d\n", error);
   7475 		goto fail_1;
   7476 	}
   7477 
   7478 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   7479 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   7480 		aprint_error_dev(sc->sc_dev,
   7481 		    "unable to create RX control data DMA map, error = %d\n",
   7482 		    error);
   7483 		goto fail_2;
   7484 	}
   7485 
   7486 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   7487 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   7488 		aprint_error_dev(sc->sc_dev,
   7489 		    "unable to load RX control data DMA map, error = %d\n",
   7490 		    error);
   7491 		goto fail_3;
   7492 	}
   7493 
   7494 	return 0;
   7495 
   7496  fail_3:
   7497 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7498  fail_2:
   7499 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7500 	    rxq_descs_size);
   7501  fail_1:
   7502 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7503  fail_0:
   7504 	return error;
   7505 }
   7506 
   7507 static void
   7508 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7509 {
   7510 
   7511 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7512 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7513 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7514 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   7515 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7516 }
   7517 
   7518 
   7519 static int
   7520 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7521 {
   7522 	int i, error;
   7523 
   7524 	/* Create the transmit buffer DMA maps. */
   7525 	WM_TXQUEUELEN(txq) =
   7526 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   7527 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   7528 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7529 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   7530 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   7531 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   7532 			aprint_error_dev(sc->sc_dev,
   7533 			    "unable to create Tx DMA map %d, error = %d\n",
   7534 			    i, error);
   7535 			goto fail;
   7536 		}
   7537 	}
   7538 
   7539 	return 0;
   7540 
   7541 fail:
   7542 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7543 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7544 			bus_dmamap_destroy(sc->sc_dmat,
   7545 			    txq->txq_soft[i].txs_dmamap);
   7546 	}
   7547 	return error;
   7548 }
   7549 
   7550 static void
   7551 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7552 {
   7553 	int i;
   7554 
   7555 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7556 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7557 			bus_dmamap_destroy(sc->sc_dmat,
   7558 			    txq->txq_soft[i].txs_dmamap);
   7559 	}
   7560 }
   7561 
   7562 static int
   7563 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7564 {
   7565 	int i, error;
   7566 
   7567 	/* Create the receive buffer DMA maps. */
   7568 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7569 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   7570 			    MCLBYTES, 0, 0,
   7571 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   7572 			aprint_error_dev(sc->sc_dev,
   7573 			    "unable to create Rx DMA map %d error = %d\n",
   7574 			    i, error);
   7575 			goto fail;
   7576 		}
   7577 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   7578 	}
   7579 
   7580 	return 0;
   7581 
   7582  fail:
   7583 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7584 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7585 			bus_dmamap_destroy(sc->sc_dmat,
   7586 			    rxq->rxq_soft[i].rxs_dmamap);
   7587 	}
   7588 	return error;
   7589 }
   7590 
   7591 static void
   7592 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7593 {
   7594 	int i;
   7595 
   7596 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7597 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7598 			bus_dmamap_destroy(sc->sc_dmat,
   7599 			    rxq->rxq_soft[i].rxs_dmamap);
   7600 	}
   7601 }
   7602 
   7603 /*
   7604  * wm_alloc_quques:
   7605  *	Allocate {tx,rx}descs and {tx,rx} buffers
   7606  */
   7607 static int
   7608 wm_alloc_txrx_queues(struct wm_softc *sc)
   7609 {
   7610 	int i, error, tx_done, rx_done;
   7611 
   7612 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   7613 	    KM_SLEEP);
   7614 	if (sc->sc_queue == NULL) {
   7615 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   7616 		error = ENOMEM;
   7617 		goto fail_0;
   7618 	}
   7619 
   7620 	/* For transmission */
   7621 	error = 0;
   7622 	tx_done = 0;
   7623 	for (i = 0; i < sc->sc_nqueues; i++) {
   7624 #ifdef WM_EVENT_COUNTERS
   7625 		int j;
   7626 		const char *xname;
   7627 #endif
   7628 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7629 		txq->txq_sc = sc;
   7630 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7631 
   7632 		error = wm_alloc_tx_descs(sc, txq);
   7633 		if (error)
   7634 			break;
   7635 		error = wm_alloc_tx_buffer(sc, txq);
   7636 		if (error) {
   7637 			wm_free_tx_descs(sc, txq);
   7638 			break;
   7639 		}
   7640 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   7641 		if (txq->txq_interq == NULL) {
   7642 			wm_free_tx_descs(sc, txq);
   7643 			wm_free_tx_buffer(sc, txq);
   7644 			error = ENOMEM;
   7645 			break;
   7646 		}
   7647 
   7648 #ifdef WM_EVENT_COUNTERS
   7649 		xname = device_xname(sc->sc_dev);
   7650 
   7651 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   7652 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   7653 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   7654 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   7655 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   7656 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   7657 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   7658 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   7659 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   7660 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   7661 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   7662 
   7663 		for (j = 0; j < WM_NTXSEGS; j++) {
   7664 			snprintf(txq->txq_txseg_evcnt_names[j],
   7665 			    sizeof(txq->txq_txseg_evcnt_names[j]),
   7666 			    "txq%02dtxseg%d", i, j);
   7667 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j],
   7668 			    EVCNT_TYPE_MISC,
   7669 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   7670 		}
   7671 
   7672 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   7673 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   7674 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   7675 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   7676 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   7677 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   7678 #endif /* WM_EVENT_COUNTERS */
   7679 
   7680 		tx_done++;
   7681 	}
   7682 	if (error)
   7683 		goto fail_1;
   7684 
   7685 	/* For receive */
   7686 	error = 0;
   7687 	rx_done = 0;
   7688 	for (i = 0; i < sc->sc_nqueues; i++) {
   7689 #ifdef WM_EVENT_COUNTERS
   7690 		const char *xname;
   7691 #endif
   7692 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7693 		rxq->rxq_sc = sc;
   7694 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7695 
   7696 		error = wm_alloc_rx_descs(sc, rxq);
   7697 		if (error)
   7698 			break;
   7699 
   7700 		error = wm_alloc_rx_buffer(sc, rxq);
   7701 		if (error) {
   7702 			wm_free_rx_descs(sc, rxq);
   7703 			break;
   7704 		}
   7705 
   7706 #ifdef WM_EVENT_COUNTERS
   7707 		xname = device_xname(sc->sc_dev);
   7708 
   7709 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   7710 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   7711 
   7712 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   7713 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   7714 #endif /* WM_EVENT_COUNTERS */
   7715 
   7716 		rx_done++;
   7717 	}
   7718 	if (error)
   7719 		goto fail_2;
   7720 
   7721 	return 0;
   7722 
   7723 fail_2:
   7724 	for (i = 0; i < rx_done; i++) {
   7725 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7726 		wm_free_rx_buffer(sc, rxq);
   7727 		wm_free_rx_descs(sc, rxq);
   7728 		if (rxq->rxq_lock)
   7729 			mutex_obj_free(rxq->rxq_lock);
   7730 	}
   7731 fail_1:
   7732 	for (i = 0; i < tx_done; i++) {
   7733 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7734 		pcq_destroy(txq->txq_interq);
   7735 		wm_free_tx_buffer(sc, txq);
   7736 		wm_free_tx_descs(sc, txq);
   7737 		if (txq->txq_lock)
   7738 			mutex_obj_free(txq->txq_lock);
   7739 	}
   7740 
   7741 	kmem_free(sc->sc_queue,
   7742 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   7743 fail_0:
   7744 	return error;
   7745 }
   7746 
   7747 /*
   7748  * wm_free_quques:
   7749  *	Free {tx,rx}descs and {tx,rx} buffers
   7750  */
   7751 static void
   7752 wm_free_txrx_queues(struct wm_softc *sc)
   7753 {
   7754 	int i;
   7755 
   7756 	for (i = 0; i < sc->sc_nqueues; i++) {
   7757 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7758 
   7759 #ifdef WM_EVENT_COUNTERS
   7760 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7761 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7762 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7763 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7764 #endif /* WM_EVENT_COUNTERS */
   7765 
   7766 		wm_free_rx_buffer(sc, rxq);
   7767 		wm_free_rx_descs(sc, rxq);
   7768 		if (rxq->rxq_lock)
   7769 			mutex_obj_free(rxq->rxq_lock);
   7770 	}
   7771 
   7772 	for (i = 0; i < sc->sc_nqueues; i++) {
   7773 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7774 		struct mbuf *m;
   7775 #ifdef WM_EVENT_COUNTERS
   7776 		int j;
   7777 
   7778 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7779 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7780 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7781 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7782 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7783 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7784 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7785 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7786 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7787 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7788 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7789 
   7790 		for (j = 0; j < WM_NTXSEGS; j++)
   7791 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7792 
   7793 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7794 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7795 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7796 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7797 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7798 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   7799 #endif /* WM_EVENT_COUNTERS */
   7800 
   7801 		/* Drain txq_interq */
   7802 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7803 			m_freem(m);
   7804 		pcq_destroy(txq->txq_interq);
   7805 
   7806 		wm_free_tx_buffer(sc, txq);
   7807 		wm_free_tx_descs(sc, txq);
   7808 		if (txq->txq_lock)
   7809 			mutex_obj_free(txq->txq_lock);
   7810 	}
   7811 
   7812 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   7813 }
   7814 
   7815 static void
   7816 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7817 {
   7818 
   7819 	KASSERT(mutex_owned(txq->txq_lock));
   7820 
   7821 	/* Initialize the transmit descriptor ring. */
   7822 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   7823 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   7824 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7825 	txq->txq_free = WM_NTXDESC(txq);
   7826 	txq->txq_next = 0;
   7827 }
   7828 
   7829 static void
   7830 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7831     struct wm_txqueue *txq)
   7832 {
   7833 
   7834 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7835 		device_xname(sc->sc_dev), __func__));
   7836 	KASSERT(mutex_owned(txq->txq_lock));
   7837 
   7838 	if (sc->sc_type < WM_T_82543) {
   7839 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   7840 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   7841 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   7842 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   7843 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   7844 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   7845 	} else {
   7846 		int qid = wmq->wmq_id;
   7847 
   7848 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   7849 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   7850 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   7851 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   7852 
   7853 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7854 			/*
   7855 			 * Don't write TDT before TCTL.EN is set.
   7856 			 * See the document.
   7857 			 */
   7858 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7859 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7860 			    | TXDCTL_WTHRESH(0));
   7861 		else {
   7862 			/* XXX should update with AIM? */
   7863 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7864 			if (sc->sc_type >= WM_T_82540) {
   7865 				/* Should be the same */
   7866 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7867 			}
   7868 
   7869 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7870 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7871 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7872 		}
   7873 	}
   7874 }
   7875 
   7876 static void
   7877 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7878 {
   7879 	int i;
   7880 
   7881 	KASSERT(mutex_owned(txq->txq_lock));
   7882 
   7883 	/* Initialize the transmit job descriptors. */
   7884 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7885 		txq->txq_soft[i].txs_mbuf = NULL;
   7886 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7887 	txq->txq_snext = 0;
   7888 	txq->txq_sdirty = 0;
   7889 }
   7890 
   7891 static void
   7892 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7893     struct wm_txqueue *txq)
   7894 {
   7895 
   7896 	KASSERT(mutex_owned(txq->txq_lock));
   7897 
   7898 	/*
   7899 	 * Set up some register offsets that are different between
   7900 	 * the i82542 and the i82543 and later chips.
   7901 	 */
   7902 	if (sc->sc_type < WM_T_82543)
   7903 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7904 	else
   7905 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7906 
   7907 	wm_init_tx_descs(sc, txq);
   7908 	wm_init_tx_regs(sc, wmq, txq);
   7909 	wm_init_tx_buffer(sc, txq);
   7910 
   7911 	/* Clear other than WM_TXQ_LINKDOWN_DISCARD */
   7912 	txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD;
   7913 
   7914 	txq->txq_sending = false;
   7915 }
   7916 
   7917 static void
   7918 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7919     struct wm_rxqueue *rxq)
   7920 {
   7921 
   7922 	KASSERT(mutex_owned(rxq->rxq_lock));
   7923 
   7924 	/*
   7925 	 * Initialize the receive descriptor and receive job
   7926 	 * descriptor rings.
   7927 	 */
   7928 	if (sc->sc_type < WM_T_82543) {
   7929 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7930 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7931 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7932 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7933 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7934 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7935 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7936 
   7937 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7938 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7939 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7940 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7941 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7942 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7943 	} else {
   7944 		int qid = wmq->wmq_id;
   7945 
   7946 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7947 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7948 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7949 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7950 
   7951 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7952 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7953 				panic("%s: MCLBYTES %d unsupported for 82575 "
   7954 				    "or higher\n", __func__, MCLBYTES);
   7955 
   7956 			/*
   7957 			 * Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF
   7958 			 * only.
   7959 			 */
   7960 			CSR_WRITE(sc, WMREG_SRRCTL(qid),
   7961 			    SRRCTL_DESCTYPE_ADV_ONEBUF
   7962 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7963 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7964 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7965 			    | RXDCTL_WTHRESH(1));
   7966 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7967 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7968 		} else {
   7969 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7970 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7971 			/* XXX should update with AIM? */
   7972 			CSR_WRITE(sc, WMREG_RDTR,
   7973 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7974 			/* MUST be same */
   7975 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7976 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7977 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7978 		}
   7979 	}
   7980 }
   7981 
   7982 static int
   7983 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7984 {
   7985 	struct wm_rxsoft *rxs;
   7986 	int error, i;
   7987 
   7988 	KASSERT(mutex_owned(rxq->rxq_lock));
   7989 
   7990 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7991 		rxs = &rxq->rxq_soft[i];
   7992 		if (rxs->rxs_mbuf == NULL) {
   7993 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7994 				log(LOG_ERR, "%s: unable to allocate or map "
   7995 				    "rx buffer %d, error = %d\n",
   7996 				    device_xname(sc->sc_dev), i, error);
   7997 				/*
   7998 				 * XXX Should attempt to run with fewer receive
   7999 				 * XXX buffers instead of just failing.
   8000 				 */
   8001 				wm_rxdrain(rxq);
   8002 				return ENOMEM;
   8003 			}
   8004 		} else {
   8005 			/*
   8006 			 * For 82575 and 82576, the RX descriptors must be
   8007 			 * initialized after the setting of RCTL.EN in
   8008 			 * wm_set_filter()
   8009 			 */
   8010 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   8011 				wm_init_rxdesc(rxq, i);
   8012 		}
   8013 	}
   8014 	rxq->rxq_ptr = 0;
   8015 	rxq->rxq_discard = 0;
   8016 	WM_RXCHAIN_RESET(rxq);
   8017 
   8018 	return 0;
   8019 }
   8020 
   8021 static int
   8022 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   8023     struct wm_rxqueue *rxq)
   8024 {
   8025 
   8026 	KASSERT(mutex_owned(rxq->rxq_lock));
   8027 
   8028 	/*
   8029 	 * Set up some register offsets that are different between
   8030 	 * the i82542 and the i82543 and later chips.
   8031 	 */
   8032 	if (sc->sc_type < WM_T_82543)
   8033 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   8034 	else
   8035 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   8036 
   8037 	wm_init_rx_regs(sc, wmq, rxq);
   8038 	return wm_init_rx_buffer(sc, rxq);
   8039 }
   8040 
   8041 /*
   8042  * wm_init_quques:
   8043  *	Initialize {tx,rx}descs and {tx,rx} buffers
   8044  */
   8045 static int
   8046 wm_init_txrx_queues(struct wm_softc *sc)
   8047 {
   8048 	int i, error = 0;
   8049 
   8050 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   8051 		device_xname(sc->sc_dev), __func__));
   8052 
   8053 	for (i = 0; i < sc->sc_nqueues; i++) {
   8054 		struct wm_queue *wmq = &sc->sc_queue[i];
   8055 		struct wm_txqueue *txq = &wmq->wmq_txq;
   8056 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8057 
   8058 		/*
   8059 		 * TODO
   8060 		 * Currently, use constant variable instead of AIM.
   8061 		 * Furthermore, the interrupt interval of multiqueue which use
   8062 		 * polling mode is less than default value.
   8063 		 * More tuning and AIM are required.
   8064 		 */
   8065 		if (wm_is_using_multiqueue(sc))
   8066 			wmq->wmq_itr = 50;
   8067 		else
   8068 			wmq->wmq_itr = sc->sc_itr_init;
   8069 		wmq->wmq_set_itr = true;
   8070 
   8071 		mutex_enter(txq->txq_lock);
   8072 		wm_init_tx_queue(sc, wmq, txq);
   8073 		mutex_exit(txq->txq_lock);
   8074 
   8075 		mutex_enter(rxq->rxq_lock);
   8076 		error = wm_init_rx_queue(sc, wmq, rxq);
   8077 		mutex_exit(rxq->rxq_lock);
   8078 		if (error)
   8079 			break;
   8080 	}
   8081 
   8082 	return error;
   8083 }
   8084 
   8085 /*
   8086  * wm_tx_offload:
   8087  *
   8088  *	Set up TCP/IP checksumming parameters for the
   8089  *	specified packet.
   8090  */
   8091 static void
   8092 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8093     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   8094 {
   8095 	struct mbuf *m0 = txs->txs_mbuf;
   8096 	struct livengood_tcpip_ctxdesc *t;
   8097 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   8098 	uint32_t ipcse;
   8099 	struct ether_header *eh;
   8100 	int offset, iphl;
   8101 	uint8_t fields;
   8102 
   8103 	/*
   8104 	 * XXX It would be nice if the mbuf pkthdr had offset
   8105 	 * fields for the protocol headers.
   8106 	 */
   8107 
   8108 	eh = mtod(m0, struct ether_header *);
   8109 	switch (htons(eh->ether_type)) {
   8110 	case ETHERTYPE_IP:
   8111 	case ETHERTYPE_IPV6:
   8112 		offset = ETHER_HDR_LEN;
   8113 		break;
   8114 
   8115 	case ETHERTYPE_VLAN:
   8116 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8117 		break;
   8118 
   8119 	default:
   8120 		/* Don't support this protocol or encapsulation. */
   8121 		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8122 		txq->txq_last_hw_ipcs = 0;
   8123 		txq->txq_last_hw_tucs = 0;
   8124 		*fieldsp = 0;
   8125 		*cmdp = 0;
   8126 		return;
   8127 	}
   8128 
   8129 	if ((m0->m_pkthdr.csum_flags &
   8130 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8131 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8132 	} else
   8133 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8134 
   8135 	ipcse = offset + iphl - 1;
   8136 
   8137 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   8138 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   8139 	seg = 0;
   8140 	fields = 0;
   8141 
   8142 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8143 		int hlen = offset + iphl;
   8144 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8145 
   8146 		if (__predict_false(m0->m_len <
   8147 				    (hlen + sizeof(struct tcphdr)))) {
   8148 			/*
   8149 			 * TCP/IP headers are not in the first mbuf; we need
   8150 			 * to do this the slow and painful way. Let's just
   8151 			 * hope this doesn't happen very often.
   8152 			 */
   8153 			struct tcphdr th;
   8154 
   8155 			WM_Q_EVCNT_INCR(txq, tsopain);
   8156 
   8157 			m_copydata(m0, hlen, sizeof(th), &th);
   8158 			if (v4) {
   8159 				struct ip ip;
   8160 
   8161 				m_copydata(m0, offset, sizeof(ip), &ip);
   8162 				ip.ip_len = 0;
   8163 				m_copyback(m0,
   8164 				    offset + offsetof(struct ip, ip_len),
   8165 				    sizeof(ip.ip_len), &ip.ip_len);
   8166 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8167 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8168 			} else {
   8169 				struct ip6_hdr ip6;
   8170 
   8171 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8172 				ip6.ip6_plen = 0;
   8173 				m_copyback(m0,
   8174 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8175 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8176 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8177 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8178 			}
   8179 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8180 			    sizeof(th.th_sum), &th.th_sum);
   8181 
   8182 			hlen += th.th_off << 2;
   8183 		} else {
   8184 			/*
   8185 			 * TCP/IP headers are in the first mbuf; we can do
   8186 			 * this the easy way.
   8187 			 */
   8188 			struct tcphdr *th;
   8189 
   8190 			if (v4) {
   8191 				struct ip *ip =
   8192 				    (void *)(mtod(m0, char *) + offset);
   8193 				th = (void *)(mtod(m0, char *) + hlen);
   8194 
   8195 				ip->ip_len = 0;
   8196 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8197 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8198 			} else {
   8199 				struct ip6_hdr *ip6 =
   8200 				    (void *)(mtod(m0, char *) + offset);
   8201 				th = (void *)(mtod(m0, char *) + hlen);
   8202 
   8203 				ip6->ip6_plen = 0;
   8204 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8205 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8206 			}
   8207 			hlen += th->th_off << 2;
   8208 		}
   8209 
   8210 		if (v4) {
   8211 			WM_Q_EVCNT_INCR(txq, tso);
   8212 			cmdlen |= WTX_TCPIP_CMD_IP;
   8213 		} else {
   8214 			WM_Q_EVCNT_INCR(txq, tso6);
   8215 			ipcse = 0;
   8216 		}
   8217 		cmd |= WTX_TCPIP_CMD_TSE;
   8218 		cmdlen |= WTX_TCPIP_CMD_TSE |
   8219 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   8220 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   8221 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   8222 	}
   8223 
   8224 	/*
   8225 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   8226 	 * offload feature, if we load the context descriptor, we
   8227 	 * MUST provide valid values for IPCSS and TUCSS fields.
   8228 	 */
   8229 
   8230 	ipcs = WTX_TCPIP_IPCSS(offset) |
   8231 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   8232 	    WTX_TCPIP_IPCSE(ipcse);
   8233 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   8234 		WM_Q_EVCNT_INCR(txq, ipsum);
   8235 		fields |= WTX_IXSM;
   8236 	}
   8237 
   8238 	offset += iphl;
   8239 
   8240 	if (m0->m_pkthdr.csum_flags &
   8241 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   8242 		WM_Q_EVCNT_INCR(txq, tusum);
   8243 		fields |= WTX_TXSM;
   8244 		tucs = WTX_TCPIP_TUCSS(offset) |
   8245 		    WTX_TCPIP_TUCSO(offset +
   8246 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   8247 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8248 	} else if ((m0->m_pkthdr.csum_flags &
   8249 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   8250 		WM_Q_EVCNT_INCR(txq, tusum6);
   8251 		fields |= WTX_TXSM;
   8252 		tucs = WTX_TCPIP_TUCSS(offset) |
   8253 		    WTX_TCPIP_TUCSO(offset +
   8254 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   8255 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8256 	} else {
   8257 		/* Just initialize it to a valid TCP context. */
   8258 		tucs = WTX_TCPIP_TUCSS(offset) |
   8259 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   8260 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8261 	}
   8262 
   8263 	*cmdp = cmd;
   8264 	*fieldsp = fields;
   8265 
   8266 	/*
   8267 	 * We don't have to write context descriptor for every packet
   8268 	 * except for 82574. For 82574, we must write context descriptor
   8269 	 * for every packet when we use two descriptor queues.
   8270 	 *
   8271 	 * The 82574L can only remember the *last* context used
   8272 	 * regardless of queue that it was use for.  We cannot reuse
   8273 	 * contexts on this hardware platform and must generate a new
   8274 	 * context every time.  82574L hardware spec, section 7.2.6,
   8275 	 * second note.
   8276 	 */
   8277 	if (sc->sc_nqueues < 2) {
   8278 		/*
   8279 		 * Setting up new checksum offload context for every
   8280 		 * frames takes a lot of processing time for hardware.
   8281 		 * This also reduces performance a lot for small sized
   8282 		 * frames so avoid it if driver can use previously
   8283 		 * configured checksum offload context.
   8284 		 * For TSO, in theory we can use the same TSO context only if
   8285 		 * frame is the same type(IP/TCP) and the same MSS. However
   8286 		 * checking whether a frame has the same IP/TCP structure is a
   8287 		 * hard thing so just ignore that and always restablish a
   8288 		 * new TSO context.
   8289 		 */
   8290 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   8291 		    == 0) {
   8292 			if (txq->txq_last_hw_cmd == cmd &&
   8293 			    txq->txq_last_hw_fields == fields &&
   8294 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   8295 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   8296 				WM_Q_EVCNT_INCR(txq, skipcontext);
   8297 				return;
   8298 			}
   8299 		}
   8300 
   8301 		txq->txq_last_hw_cmd = cmd;
   8302 		txq->txq_last_hw_fields = fields;
   8303 		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   8304 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   8305 	}
   8306 
   8307 	/* Fill in the context descriptor. */
   8308 	t = (struct livengood_tcpip_ctxdesc *)
   8309 	    &txq->txq_descs[txq->txq_next];
   8310 	t->tcpip_ipcs = htole32(ipcs);
   8311 	t->tcpip_tucs = htole32(tucs);
   8312 	t->tcpip_cmdlen = htole32(cmdlen);
   8313 	t->tcpip_seg = htole32(seg);
   8314 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8315 
   8316 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8317 	txs->txs_ndesc++;
   8318 }
   8319 
   8320 static inline int
   8321 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   8322 {
   8323 	struct wm_softc *sc = ifp->if_softc;
   8324 	u_int cpuid = cpu_index(curcpu());
   8325 
   8326 	/*
   8327 	 * Currently, simple distribute strategy.
   8328 	 * TODO:
   8329 	 * distribute by flowid(RSS has value).
   8330 	 */
   8331 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   8332 }
   8333 
   8334 static inline bool
   8335 wm_linkdown_discard(struct wm_txqueue *txq)
   8336 {
   8337 
   8338 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
   8339 		return true;
   8340 
   8341 	return false;
   8342 }
   8343 
   8344 /*
   8345  * wm_start:		[ifnet interface function]
   8346  *
   8347  *	Start packet transmission on the interface.
   8348  */
   8349 static void
   8350 wm_start(struct ifnet *ifp)
   8351 {
   8352 	struct wm_softc *sc = ifp->if_softc;
   8353 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8354 
   8355 	KASSERT(if_is_mpsafe(ifp));
   8356 	/*
   8357 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8358 	 */
   8359 
   8360 	mutex_enter(txq->txq_lock);
   8361 	if (!txq->txq_stopping)
   8362 		wm_start_locked(ifp);
   8363 	mutex_exit(txq->txq_lock);
   8364 }
   8365 
   8366 static void
   8367 wm_start_locked(struct ifnet *ifp)
   8368 {
   8369 	struct wm_softc *sc = ifp->if_softc;
   8370 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8371 
   8372 	wm_send_common_locked(ifp, txq, false);
   8373 }
   8374 
   8375 static int
   8376 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   8377 {
   8378 	int qid;
   8379 	struct wm_softc *sc = ifp->if_softc;
   8380 	struct wm_txqueue *txq;
   8381 
   8382 	qid = wm_select_txqueue(ifp, m);
   8383 	txq = &sc->sc_queue[qid].wmq_txq;
   8384 
   8385 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8386 		m_freem(m);
   8387 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8388 		return ENOBUFS;
   8389 	}
   8390 
   8391 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8392 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8393 	if (m->m_flags & M_MCAST)
   8394 		if_statinc_ref(nsr, if_omcasts);
   8395 	IF_STAT_PUTREF(ifp);
   8396 
   8397 	if (mutex_tryenter(txq->txq_lock)) {
   8398 		if (!txq->txq_stopping)
   8399 			wm_transmit_locked(ifp, txq);
   8400 		mutex_exit(txq->txq_lock);
   8401 	}
   8402 
   8403 	return 0;
   8404 }
   8405 
   8406 static void
   8407 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8408 {
   8409 
   8410 	wm_send_common_locked(ifp, txq, true);
   8411 }
   8412 
   8413 static void
   8414 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8415     bool is_transmit)
   8416 {
   8417 	struct wm_softc *sc = ifp->if_softc;
   8418 	struct mbuf *m0;
   8419 	struct wm_txsoft *txs;
   8420 	bus_dmamap_t dmamap;
   8421 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   8422 	bus_addr_t curaddr;
   8423 	bus_size_t seglen, curlen;
   8424 	uint32_t cksumcmd;
   8425 	uint8_t cksumfields;
   8426 	bool remap = true;
   8427 
   8428 	KASSERT(mutex_owned(txq->txq_lock));
   8429 	KASSERT(!txq->txq_stopping);
   8430 
   8431 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8432 		return;
   8433 
   8434 	if (__predict_false(wm_linkdown_discard(txq))) {
   8435 		do {
   8436 			if (is_transmit)
   8437 				m0 = pcq_get(txq->txq_interq);
   8438 			else
   8439 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   8440 			/*
   8441 			 * increment successed packet counter as in the case
   8442 			 * which the packet is discarded by link down PHY.
   8443 			 */
   8444 			if (m0 != NULL) {
   8445 				if_statinc(ifp, if_opackets);
   8446 				m_freem(m0);
   8447 			}
   8448 		} while (m0 != NULL);
   8449 		return;
   8450 	}
   8451 
   8452 	/* Remember the previous number of free descriptors. */
   8453 	ofree = txq->txq_free;
   8454 
   8455 	/*
   8456 	 * Loop through the send queue, setting up transmit descriptors
   8457 	 * until we drain the queue, or use up all available transmit
   8458 	 * descriptors.
   8459 	 */
   8460 	for (;;) {
   8461 		m0 = NULL;
   8462 
   8463 		/* Get a work queue entry. */
   8464 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8465 			wm_txeof(txq, UINT_MAX);
   8466 			if (txq->txq_sfree == 0) {
   8467 				DPRINTF(sc, WM_DEBUG_TX,
   8468 				    ("%s: TX: no free job descriptors\n",
   8469 					device_xname(sc->sc_dev)));
   8470 				WM_Q_EVCNT_INCR(txq, txsstall);
   8471 				break;
   8472 			}
   8473 		}
   8474 
   8475 		/* Grab a packet off the queue. */
   8476 		if (is_transmit)
   8477 			m0 = pcq_get(txq->txq_interq);
   8478 		else
   8479 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8480 		if (m0 == NULL)
   8481 			break;
   8482 
   8483 		DPRINTF(sc, WM_DEBUG_TX,
   8484 		    ("%s: TX: have packet to transmit: %p\n",
   8485 			device_xname(sc->sc_dev), m0));
   8486 
   8487 		txs = &txq->txq_soft[txq->txq_snext];
   8488 		dmamap = txs->txs_dmamap;
   8489 
   8490 		use_tso = (m0->m_pkthdr.csum_flags &
   8491 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   8492 
   8493 		/*
   8494 		 * So says the Linux driver:
   8495 		 * The controller does a simple calculation to make sure
   8496 		 * there is enough room in the FIFO before initiating the
   8497 		 * DMA for each buffer. The calc is:
   8498 		 *	4 = ceil(buffer len / MSS)
   8499 		 * To make sure we don't overrun the FIFO, adjust the max
   8500 		 * buffer len if the MSS drops.
   8501 		 */
   8502 		dmamap->dm_maxsegsz =
   8503 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   8504 		    ? m0->m_pkthdr.segsz << 2
   8505 		    : WTX_MAX_LEN;
   8506 
   8507 		/*
   8508 		 * Load the DMA map.  If this fails, the packet either
   8509 		 * didn't fit in the allotted number of segments, or we
   8510 		 * were short on resources.  For the too-many-segments
   8511 		 * case, we simply report an error and drop the packet,
   8512 		 * since we can't sanely copy a jumbo packet to a single
   8513 		 * buffer.
   8514 		 */
   8515 retry:
   8516 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8517 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8518 		if (__predict_false(error)) {
   8519 			if (error == EFBIG) {
   8520 				if (remap == true) {
   8521 					struct mbuf *m;
   8522 
   8523 					remap = false;
   8524 					m = m_defrag(m0, M_NOWAIT);
   8525 					if (m != NULL) {
   8526 						WM_Q_EVCNT_INCR(txq, defrag);
   8527 						m0 = m;
   8528 						goto retry;
   8529 					}
   8530 				}
   8531 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8532 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8533 				    "DMA segments, dropping...\n",
   8534 				    device_xname(sc->sc_dev));
   8535 				wm_dump_mbuf_chain(sc, m0);
   8536 				m_freem(m0);
   8537 				continue;
   8538 			}
   8539 			/* Short on resources, just stop for now. */
   8540 			DPRINTF(sc, WM_DEBUG_TX,
   8541 			    ("%s: TX: dmamap load failed: %d\n",
   8542 				device_xname(sc->sc_dev), error));
   8543 			break;
   8544 		}
   8545 
   8546 		segs_needed = dmamap->dm_nsegs;
   8547 		if (use_tso) {
   8548 			/* For sentinel descriptor; see below. */
   8549 			segs_needed++;
   8550 		}
   8551 
   8552 		/*
   8553 		 * Ensure we have enough descriptors free to describe
   8554 		 * the packet. Note, we always reserve one descriptor
   8555 		 * at the end of the ring due to the semantics of the
   8556 		 * TDT register, plus one more in the event we need
   8557 		 * to load offload context.
   8558 		 */
   8559 		if (segs_needed > txq->txq_free - 2) {
   8560 			/*
   8561 			 * Not enough free descriptors to transmit this
   8562 			 * packet.  We haven't committed anything yet,
   8563 			 * so just unload the DMA map, put the packet
   8564 			 * pack on the queue, and punt. Notify the upper
   8565 			 * layer that there are no more slots left.
   8566 			 */
   8567 			DPRINTF(sc, WM_DEBUG_TX,
   8568 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8569 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8570 				segs_needed, txq->txq_free - 1));
   8571 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8572 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8573 			WM_Q_EVCNT_INCR(txq, txdstall);
   8574 			break;
   8575 		}
   8576 
   8577 		/*
   8578 		 * Check for 82547 Tx FIFO bug. We need to do this
   8579 		 * once we know we can transmit the packet, since we
   8580 		 * do some internal FIFO space accounting here.
   8581 		 */
   8582 		if (sc->sc_type == WM_T_82547 &&
   8583 		    wm_82547_txfifo_bugchk(sc, m0)) {
   8584 			DPRINTF(sc, WM_DEBUG_TX,
   8585 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   8586 				device_xname(sc->sc_dev)));
   8587 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8588 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8589 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   8590 			break;
   8591 		}
   8592 
   8593 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8594 
   8595 		DPRINTF(sc, WM_DEBUG_TX,
   8596 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8597 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8598 
   8599 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8600 
   8601 		/*
   8602 		 * Store a pointer to the packet so that we can free it
   8603 		 * later.
   8604 		 *
   8605 		 * Initially, we consider the number of descriptors the
   8606 		 * packet uses the number of DMA segments.  This may be
   8607 		 * incremented by 1 if we do checksum offload (a descriptor
   8608 		 * is used to set the checksum context).
   8609 		 */
   8610 		txs->txs_mbuf = m0;
   8611 		txs->txs_firstdesc = txq->txq_next;
   8612 		txs->txs_ndesc = segs_needed;
   8613 
   8614 		/* Set up offload parameters for this packet. */
   8615 		if (m0->m_pkthdr.csum_flags &
   8616 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8617 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8618 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8619 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   8620 		} else {
   8621 			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8622 			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   8623 			cksumcmd = 0;
   8624 			cksumfields = 0;
   8625 		}
   8626 
   8627 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   8628 
   8629 		/* Sync the DMA map. */
   8630 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8631 		    BUS_DMASYNC_PREWRITE);
   8632 
   8633 		/* Initialize the transmit descriptor. */
   8634 		for (nexttx = txq->txq_next, seg = 0;
   8635 		     seg < dmamap->dm_nsegs; seg++) {
   8636 			for (seglen = dmamap->dm_segs[seg].ds_len,
   8637 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   8638 			     seglen != 0;
   8639 			     curaddr += curlen, seglen -= curlen,
   8640 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   8641 				curlen = seglen;
   8642 
   8643 				/*
   8644 				 * So says the Linux driver:
   8645 				 * Work around for premature descriptor
   8646 				 * write-backs in TSO mode.  Append a
   8647 				 * 4-byte sentinel descriptor.
   8648 				 */
   8649 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   8650 				    curlen > 8)
   8651 					curlen -= 4;
   8652 
   8653 				wm_set_dma_addr(
   8654 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   8655 				txq->txq_descs[nexttx].wtx_cmdlen
   8656 				    = htole32(cksumcmd | curlen);
   8657 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   8658 				    = 0;
   8659 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   8660 				    = cksumfields;
   8661 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8662 				lasttx = nexttx;
   8663 
   8664 				DPRINTF(sc, WM_DEBUG_TX,
   8665 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   8666 					"len %#04zx\n",
   8667 					device_xname(sc->sc_dev), nexttx,
   8668 					(uint64_t)curaddr, curlen));
   8669 			}
   8670 		}
   8671 
   8672 		KASSERT(lasttx != -1);
   8673 
   8674 		/*
   8675 		 * Set up the command byte on the last descriptor of
   8676 		 * the packet. If we're in the interrupt delay window,
   8677 		 * delay the interrupt.
   8678 		 */
   8679 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8680 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8681 
   8682 		/*
   8683 		 * If VLANs are enabled and the packet has a VLAN tag, set
   8684 		 * up the descriptor to encapsulate the packet for us.
   8685 		 *
   8686 		 * This is only valid on the last descriptor of the packet.
   8687 		 */
   8688 		if (vlan_has_tag(m0)) {
   8689 			txq->txq_descs[lasttx].wtx_cmdlen |=
   8690 			    htole32(WTX_CMD_VLE);
   8691 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   8692 			    = htole16(vlan_get_tag(m0));
   8693 		}
   8694 
   8695 		txs->txs_lastdesc = lasttx;
   8696 
   8697 		DPRINTF(sc, WM_DEBUG_TX,
   8698 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8699 			device_xname(sc->sc_dev),
   8700 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8701 
   8702 		/* Sync the descriptors we're using. */
   8703 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8704 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8705 
   8706 		/* Give the packet to the chip. */
   8707 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8708 
   8709 		DPRINTF(sc, WM_DEBUG_TX,
   8710 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8711 
   8712 		DPRINTF(sc, WM_DEBUG_TX,
   8713 		    ("%s: TX: finished transmitting packet, job %d\n",
   8714 			device_xname(sc->sc_dev), txq->txq_snext));
   8715 
   8716 		/* Advance the tx pointer. */
   8717 		txq->txq_free -= txs->txs_ndesc;
   8718 		txq->txq_next = nexttx;
   8719 
   8720 		txq->txq_sfree--;
   8721 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8722 
   8723 		/* Pass the packet to any BPF listeners. */
   8724 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8725 	}
   8726 
   8727 	if (m0 != NULL) {
   8728 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8729 		WM_Q_EVCNT_INCR(txq, descdrop);
   8730 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8731 			__func__));
   8732 		m_freem(m0);
   8733 	}
   8734 
   8735 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8736 		/* No more slots; notify upper layer. */
   8737 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8738 	}
   8739 
   8740 	if (txq->txq_free != ofree) {
   8741 		/* Set a watchdog timer in case the chip flakes out. */
   8742 		txq->txq_lastsent = time_uptime;
   8743 		txq->txq_sending = true;
   8744 	}
   8745 }
   8746 
   8747 /*
   8748  * wm_nq_tx_offload:
   8749  *
   8750  *	Set up TCP/IP checksumming parameters for the
   8751  *	specified packet, for NEWQUEUE devices
   8752  */
   8753 static void
   8754 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8755     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   8756 {
   8757 	struct mbuf *m0 = txs->txs_mbuf;
   8758 	uint32_t vl_len, mssidx, cmdc;
   8759 	struct ether_header *eh;
   8760 	int offset, iphl;
   8761 
   8762 	/*
   8763 	 * XXX It would be nice if the mbuf pkthdr had offset
   8764 	 * fields for the protocol headers.
   8765 	 */
   8766 	*cmdlenp = 0;
   8767 	*fieldsp = 0;
   8768 
   8769 	eh = mtod(m0, struct ether_header *);
   8770 	switch (htons(eh->ether_type)) {
   8771 	case ETHERTYPE_IP:
   8772 	case ETHERTYPE_IPV6:
   8773 		offset = ETHER_HDR_LEN;
   8774 		break;
   8775 
   8776 	case ETHERTYPE_VLAN:
   8777 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8778 		break;
   8779 
   8780 	default:
   8781 		/* Don't support this protocol or encapsulation. */
   8782 		*do_csum = false;
   8783 		return;
   8784 	}
   8785 	*do_csum = true;
   8786 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   8787 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   8788 
   8789 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   8790 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   8791 
   8792 	if ((m0->m_pkthdr.csum_flags &
   8793 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8794 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8795 	} else {
   8796 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8797 	}
   8798 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   8799 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   8800 
   8801 	if (vlan_has_tag(m0)) {
   8802 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8803 		    << NQTXC_VLLEN_VLAN_SHIFT);
   8804 		*cmdlenp |= NQTX_CMD_VLE;
   8805 	}
   8806 
   8807 	mssidx = 0;
   8808 
   8809 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8810 		int hlen = offset + iphl;
   8811 		int tcp_hlen;
   8812 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8813 
   8814 		if (__predict_false(m0->m_len <
   8815 				    (hlen + sizeof(struct tcphdr)))) {
   8816 			/*
   8817 			 * TCP/IP headers are not in the first mbuf; we need
   8818 			 * to do this the slow and painful way. Let's just
   8819 			 * hope this doesn't happen very often.
   8820 			 */
   8821 			struct tcphdr th;
   8822 
   8823 			WM_Q_EVCNT_INCR(txq, tsopain);
   8824 
   8825 			m_copydata(m0, hlen, sizeof(th), &th);
   8826 			if (v4) {
   8827 				struct ip ip;
   8828 
   8829 				m_copydata(m0, offset, sizeof(ip), &ip);
   8830 				ip.ip_len = 0;
   8831 				m_copyback(m0,
   8832 				    offset + offsetof(struct ip, ip_len),
   8833 				    sizeof(ip.ip_len), &ip.ip_len);
   8834 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8835 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8836 			} else {
   8837 				struct ip6_hdr ip6;
   8838 
   8839 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8840 				ip6.ip6_plen = 0;
   8841 				m_copyback(m0,
   8842 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8843 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8844 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8845 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8846 			}
   8847 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8848 			    sizeof(th.th_sum), &th.th_sum);
   8849 
   8850 			tcp_hlen = th.th_off << 2;
   8851 		} else {
   8852 			/*
   8853 			 * TCP/IP headers are in the first mbuf; we can do
   8854 			 * this the easy way.
   8855 			 */
   8856 			struct tcphdr *th;
   8857 
   8858 			if (v4) {
   8859 				struct ip *ip =
   8860 				    (void *)(mtod(m0, char *) + offset);
   8861 				th = (void *)(mtod(m0, char *) + hlen);
   8862 
   8863 				ip->ip_len = 0;
   8864 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8865 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8866 			} else {
   8867 				struct ip6_hdr *ip6 =
   8868 				    (void *)(mtod(m0, char *) + offset);
   8869 				th = (void *)(mtod(m0, char *) + hlen);
   8870 
   8871 				ip6->ip6_plen = 0;
   8872 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8873 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8874 			}
   8875 			tcp_hlen = th->th_off << 2;
   8876 		}
   8877 		hlen += tcp_hlen;
   8878 		*cmdlenp |= NQTX_CMD_TSE;
   8879 
   8880 		if (v4) {
   8881 			WM_Q_EVCNT_INCR(txq, tso);
   8882 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   8883 		} else {
   8884 			WM_Q_EVCNT_INCR(txq, tso6);
   8885 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   8886 		}
   8887 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   8888 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8889 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   8890 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   8891 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   8892 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   8893 	} else {
   8894 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   8895 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8896 	}
   8897 
   8898 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   8899 		*fieldsp |= NQTXD_FIELDS_IXSM;
   8900 		cmdc |= NQTXC_CMD_IP4;
   8901 	}
   8902 
   8903 	if (m0->m_pkthdr.csum_flags &
   8904 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   8905 		WM_Q_EVCNT_INCR(txq, tusum);
   8906 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   8907 			cmdc |= NQTXC_CMD_TCP;
   8908 		else
   8909 			cmdc |= NQTXC_CMD_UDP;
   8910 
   8911 		cmdc |= NQTXC_CMD_IP4;
   8912 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8913 	}
   8914 	if (m0->m_pkthdr.csum_flags &
   8915 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8916 		WM_Q_EVCNT_INCR(txq, tusum6);
   8917 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8918 			cmdc |= NQTXC_CMD_TCP;
   8919 		else
   8920 			cmdc |= NQTXC_CMD_UDP;
   8921 
   8922 		cmdc |= NQTXC_CMD_IP6;
   8923 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8924 	}
   8925 
   8926 	/*
   8927 	 * We don't have to write context descriptor for every packet to
   8928 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8929 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8930 	 * controllers.
   8931 	 * It would be overhead to write context descriptor for every packet,
   8932 	 * however it does not cause problems.
   8933 	 */
   8934 	/* Fill in the context descriptor. */
   8935 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_vl_len =
   8936 	    htole32(vl_len);
   8937 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_sn = 0;
   8938 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_cmd =
   8939 	    htole32(cmdc);
   8940 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_mssidx =
   8941 	    htole32(mssidx);
   8942 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8943 	DPRINTF(sc, WM_DEBUG_TX,
   8944 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8945 		txq->txq_next, 0, vl_len));
   8946 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8947 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8948 	txs->txs_ndesc++;
   8949 }
   8950 
   8951 /*
   8952  * wm_nq_start:		[ifnet interface function]
   8953  *
   8954  *	Start packet transmission on the interface for NEWQUEUE devices
   8955  */
   8956 static void
   8957 wm_nq_start(struct ifnet *ifp)
   8958 {
   8959 	struct wm_softc *sc = ifp->if_softc;
   8960 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8961 
   8962 	KASSERT(if_is_mpsafe(ifp));
   8963 	/*
   8964 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8965 	 */
   8966 
   8967 	mutex_enter(txq->txq_lock);
   8968 	if (!txq->txq_stopping)
   8969 		wm_nq_start_locked(ifp);
   8970 	mutex_exit(txq->txq_lock);
   8971 }
   8972 
   8973 static void
   8974 wm_nq_start_locked(struct ifnet *ifp)
   8975 {
   8976 	struct wm_softc *sc = ifp->if_softc;
   8977 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8978 
   8979 	wm_nq_send_common_locked(ifp, txq, false);
   8980 }
   8981 
   8982 static int
   8983 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8984 {
   8985 	int qid;
   8986 	struct wm_softc *sc = ifp->if_softc;
   8987 	struct wm_txqueue *txq;
   8988 
   8989 	qid = wm_select_txqueue(ifp, m);
   8990 	txq = &sc->sc_queue[qid].wmq_txq;
   8991 
   8992 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8993 		m_freem(m);
   8994 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8995 		return ENOBUFS;
   8996 	}
   8997 
   8998 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8999 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   9000 	if (m->m_flags & M_MCAST)
   9001 		if_statinc_ref(nsr, if_omcasts);
   9002 	IF_STAT_PUTREF(ifp);
   9003 
   9004 	/*
   9005 	 * The situations which this mutex_tryenter() fails at running time
   9006 	 * are below two patterns.
   9007 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   9008 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   9009 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   9010 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   9011 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   9012 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   9013 	 * stuck, either.
   9014 	 */
   9015 	if (mutex_tryenter(txq->txq_lock)) {
   9016 		if (!txq->txq_stopping)
   9017 			wm_nq_transmit_locked(ifp, txq);
   9018 		mutex_exit(txq->txq_lock);
   9019 	}
   9020 
   9021 	return 0;
   9022 }
   9023 
   9024 static void
   9025 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   9026 {
   9027 
   9028 	wm_nq_send_common_locked(ifp, txq, true);
   9029 }
   9030 
   9031 static void
   9032 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   9033     bool is_transmit)
   9034 {
   9035 	struct wm_softc *sc = ifp->if_softc;
   9036 	struct mbuf *m0;
   9037 	struct wm_txsoft *txs;
   9038 	bus_dmamap_t dmamap;
   9039 	int error, nexttx, lasttx = -1, seg, segs_needed;
   9040 	bool do_csum, sent;
   9041 	bool remap = true;
   9042 
   9043 	KASSERT(mutex_owned(txq->txq_lock));
   9044 	KASSERT(!txq->txq_stopping);
   9045 
   9046 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   9047 		return;
   9048 
   9049 	if (__predict_false(wm_linkdown_discard(txq))) {
   9050 		do {
   9051 			if (is_transmit)
   9052 				m0 = pcq_get(txq->txq_interq);
   9053 			else
   9054 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   9055 			/*
   9056 			 * increment successed packet counter as in the case
   9057 			 * which the packet is discarded by link down PHY.
   9058 			 */
   9059 			if (m0 != NULL) {
   9060 				if_statinc(ifp, if_opackets);
   9061 				m_freem(m0);
   9062 			}
   9063 		} while (m0 != NULL);
   9064 		return;
   9065 	}
   9066 
   9067 	sent = false;
   9068 
   9069 	/*
   9070 	 * Loop through the send queue, setting up transmit descriptors
   9071 	 * until we drain the queue, or use up all available transmit
   9072 	 * descriptors.
   9073 	 */
   9074 	for (;;) {
   9075 		m0 = NULL;
   9076 
   9077 		/* Get a work queue entry. */
   9078 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   9079 			wm_txeof(txq, UINT_MAX);
   9080 			if (txq->txq_sfree == 0) {
   9081 				DPRINTF(sc, WM_DEBUG_TX,
   9082 				    ("%s: TX: no free job descriptors\n",
   9083 					device_xname(sc->sc_dev)));
   9084 				WM_Q_EVCNT_INCR(txq, txsstall);
   9085 				break;
   9086 			}
   9087 		}
   9088 
   9089 		/* Grab a packet off the queue. */
   9090 		if (is_transmit)
   9091 			m0 = pcq_get(txq->txq_interq);
   9092 		else
   9093 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   9094 		if (m0 == NULL)
   9095 			break;
   9096 
   9097 		DPRINTF(sc, WM_DEBUG_TX,
   9098 		    ("%s: TX: have packet to transmit: %p\n",
   9099 			device_xname(sc->sc_dev), m0));
   9100 
   9101 		txs = &txq->txq_soft[txq->txq_snext];
   9102 		dmamap = txs->txs_dmamap;
   9103 
   9104 		/*
   9105 		 * Load the DMA map.  If this fails, the packet either
   9106 		 * didn't fit in the allotted number of segments, or we
   9107 		 * were short on resources.  For the too-many-segments
   9108 		 * case, we simply report an error and drop the packet,
   9109 		 * since we can't sanely copy a jumbo packet to a single
   9110 		 * buffer.
   9111 		 */
   9112 retry:
   9113 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   9114 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   9115 		if (__predict_false(error)) {
   9116 			if (error == EFBIG) {
   9117 				if (remap == true) {
   9118 					struct mbuf *m;
   9119 
   9120 					remap = false;
   9121 					m = m_defrag(m0, M_NOWAIT);
   9122 					if (m != NULL) {
   9123 						WM_Q_EVCNT_INCR(txq, defrag);
   9124 						m0 = m;
   9125 						goto retry;
   9126 					}
   9127 				}
   9128 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   9129 				log(LOG_ERR, "%s: Tx packet consumes too many "
   9130 				    "DMA segments, dropping...\n",
   9131 				    device_xname(sc->sc_dev));
   9132 				wm_dump_mbuf_chain(sc, m0);
   9133 				m_freem(m0);
   9134 				continue;
   9135 			}
   9136 			/* Short on resources, just stop for now. */
   9137 			DPRINTF(sc, WM_DEBUG_TX,
   9138 			    ("%s: TX: dmamap load failed: %d\n",
   9139 				device_xname(sc->sc_dev), error));
   9140 			break;
   9141 		}
   9142 
   9143 		segs_needed = dmamap->dm_nsegs;
   9144 
   9145 		/*
   9146 		 * Ensure we have enough descriptors free to describe
   9147 		 * the packet. Note, we always reserve one descriptor
   9148 		 * at the end of the ring due to the semantics of the
   9149 		 * TDT register, plus one more in the event we need
   9150 		 * to load offload context.
   9151 		 */
   9152 		if (segs_needed > txq->txq_free - 2) {
   9153 			/*
   9154 			 * Not enough free descriptors to transmit this
   9155 			 * packet.  We haven't committed anything yet,
   9156 			 * so just unload the DMA map, put the packet
   9157 			 * pack on the queue, and punt. Notify the upper
   9158 			 * layer that there are no more slots left.
   9159 			 */
   9160 			DPRINTF(sc, WM_DEBUG_TX,
   9161 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   9162 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   9163 				segs_needed, txq->txq_free - 1));
   9164 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   9165 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   9166 			WM_Q_EVCNT_INCR(txq, txdstall);
   9167 			break;
   9168 		}
   9169 
   9170 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   9171 
   9172 		DPRINTF(sc, WM_DEBUG_TX,
   9173 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   9174 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   9175 
   9176 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   9177 
   9178 		/*
   9179 		 * Store a pointer to the packet so that we can free it
   9180 		 * later.
   9181 		 *
   9182 		 * Initially, we consider the number of descriptors the
   9183 		 * packet uses the number of DMA segments.  This may be
   9184 		 * incremented by 1 if we do checksum offload (a descriptor
   9185 		 * is used to set the checksum context).
   9186 		 */
   9187 		txs->txs_mbuf = m0;
   9188 		txs->txs_firstdesc = txq->txq_next;
   9189 		txs->txs_ndesc = segs_needed;
   9190 
   9191 		/* Set up offload parameters for this packet. */
   9192 		uint32_t cmdlen, fields, dcmdlen;
   9193 		if (m0->m_pkthdr.csum_flags &
   9194 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   9195 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9196 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   9197 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   9198 			    &do_csum);
   9199 		} else {
   9200 			do_csum = false;
   9201 			cmdlen = 0;
   9202 			fields = 0;
   9203 		}
   9204 
   9205 		/* Sync the DMA map. */
   9206 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   9207 		    BUS_DMASYNC_PREWRITE);
   9208 
   9209 		/* Initialize the first transmit descriptor. */
   9210 		nexttx = txq->txq_next;
   9211 		if (!do_csum) {
   9212 			/* Set up a legacy descriptor */
   9213 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   9214 			    dmamap->dm_segs[0].ds_addr);
   9215 			txq->txq_descs[nexttx].wtx_cmdlen =
   9216 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   9217 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   9218 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   9219 			if (vlan_has_tag(m0)) {
   9220 				txq->txq_descs[nexttx].wtx_cmdlen |=
   9221 				    htole32(WTX_CMD_VLE);
   9222 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   9223 				    htole16(vlan_get_tag(m0));
   9224 			} else
   9225 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   9226 
   9227 			dcmdlen = 0;
   9228 		} else {
   9229 			/* Set up an advanced data descriptor */
   9230 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9231 			    htole64(dmamap->dm_segs[0].ds_addr);
   9232 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   9233 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9234 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   9235 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   9236 			    htole32(fields);
   9237 			DPRINTF(sc, WM_DEBUG_TX,
   9238 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   9239 				device_xname(sc->sc_dev), nexttx,
   9240 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   9241 			DPRINTF(sc, WM_DEBUG_TX,
   9242 			    ("\t 0x%08x%08x\n", fields,
   9243 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   9244 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   9245 		}
   9246 
   9247 		lasttx = nexttx;
   9248 		nexttx = WM_NEXTTX(txq, nexttx);
   9249 		/*
   9250 		 * Fill in the next descriptors. Legacy or advanced format
   9251 		 * is the same here.
   9252 		 */
   9253 		for (seg = 1; seg < dmamap->dm_nsegs;
   9254 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   9255 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9256 			    htole64(dmamap->dm_segs[seg].ds_addr);
   9257 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9258 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   9259 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   9260 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   9261 			lasttx = nexttx;
   9262 
   9263 			DPRINTF(sc, WM_DEBUG_TX,
   9264 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   9265 				device_xname(sc->sc_dev), nexttx,
   9266 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   9267 				dmamap->dm_segs[seg].ds_len));
   9268 		}
   9269 
   9270 		KASSERT(lasttx != -1);
   9271 
   9272 		/*
   9273 		 * Set up the command byte on the last descriptor of
   9274 		 * the packet. If we're in the interrupt delay window,
   9275 		 * delay the interrupt.
   9276 		 */
   9277 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   9278 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   9279 		txq->txq_descs[lasttx].wtx_cmdlen |=
   9280 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   9281 
   9282 		txs->txs_lastdesc = lasttx;
   9283 
   9284 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   9285 		    device_xname(sc->sc_dev),
   9286 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   9287 
   9288 		/* Sync the descriptors we're using. */
   9289 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   9290 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   9291 
   9292 		/* Give the packet to the chip. */
   9293 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   9294 		sent = true;
   9295 
   9296 		DPRINTF(sc, WM_DEBUG_TX,
   9297 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   9298 
   9299 		DPRINTF(sc, WM_DEBUG_TX,
   9300 		    ("%s: TX: finished transmitting packet, job %d\n",
   9301 			device_xname(sc->sc_dev), txq->txq_snext));
   9302 
   9303 		/* Advance the tx pointer. */
   9304 		txq->txq_free -= txs->txs_ndesc;
   9305 		txq->txq_next = nexttx;
   9306 
   9307 		txq->txq_sfree--;
   9308 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   9309 
   9310 		/* Pass the packet to any BPF listeners. */
   9311 		bpf_mtap(ifp, m0, BPF_D_OUT);
   9312 	}
   9313 
   9314 	if (m0 != NULL) {
   9315 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9316 		WM_Q_EVCNT_INCR(txq, descdrop);
   9317 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   9318 			__func__));
   9319 		m_freem(m0);
   9320 	}
   9321 
   9322 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   9323 		/* No more slots; notify upper layer. */
   9324 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9325 	}
   9326 
   9327 	if (sent) {
   9328 		/* Set a watchdog timer in case the chip flakes out. */
   9329 		txq->txq_lastsent = time_uptime;
   9330 		txq->txq_sending = true;
   9331 	}
   9332 }
   9333 
   9334 static void
   9335 wm_deferred_start_locked(struct wm_txqueue *txq)
   9336 {
   9337 	struct wm_softc *sc = txq->txq_sc;
   9338 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9339 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   9340 	int qid = wmq->wmq_id;
   9341 
   9342 	KASSERT(mutex_owned(txq->txq_lock));
   9343 	KASSERT(!txq->txq_stopping);
   9344 
   9345 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   9346 		/* XXX need for ALTQ or one CPU system */
   9347 		if (qid == 0)
   9348 			wm_nq_start_locked(ifp);
   9349 		wm_nq_transmit_locked(ifp, txq);
   9350 	} else {
   9351 		/* XXX need for ALTQ or one CPU system */
   9352 		if (qid == 0)
   9353 			wm_start_locked(ifp);
   9354 		wm_transmit_locked(ifp, txq);
   9355 	}
   9356 }
   9357 
   9358 /* Interrupt */
   9359 
   9360 /*
   9361  * wm_txeof:
   9362  *
   9363  *	Helper; handle transmit interrupts.
   9364  */
   9365 static bool
   9366 wm_txeof(struct wm_txqueue *txq, u_int limit)
   9367 {
   9368 	struct wm_softc *sc = txq->txq_sc;
   9369 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9370 	struct wm_txsoft *txs;
   9371 	int count = 0;
   9372 	int i;
   9373 	uint8_t status;
   9374 	bool more = false;
   9375 
   9376 	KASSERT(mutex_owned(txq->txq_lock));
   9377 
   9378 	if (txq->txq_stopping)
   9379 		return false;
   9380 
   9381 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   9382 
   9383 	/*
   9384 	 * Go through the Tx list and free mbufs for those
   9385 	 * frames which have been transmitted.
   9386 	 */
   9387 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   9388 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   9389 		txs = &txq->txq_soft[i];
   9390 
   9391 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   9392 			device_xname(sc->sc_dev), i));
   9393 
   9394 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   9395 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9396 
   9397 		status =
   9398 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   9399 		if ((status & WTX_ST_DD) == 0) {
   9400 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   9401 			    BUS_DMASYNC_PREREAD);
   9402 			break;
   9403 		}
   9404 
   9405 		if (limit-- == 0) {
   9406 			more = true;
   9407 			DPRINTF(sc, WM_DEBUG_TX,
   9408 			    ("%s: TX: loop limited, job %d is not processed\n",
   9409 				device_xname(sc->sc_dev), i));
   9410 			break;
   9411 		}
   9412 
   9413 		count++;
   9414 		DPRINTF(sc, WM_DEBUG_TX,
   9415 		    ("%s: TX: job %d done: descs %d..%d\n",
   9416 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   9417 		    txs->txs_lastdesc));
   9418 
   9419 		/*
   9420 		 * XXX We should probably be using the statistics
   9421 		 * XXX registers, but I don't know if they exist
   9422 		 * XXX on chips before the i82544.
   9423 		 */
   9424 
   9425 #ifdef WM_EVENT_COUNTERS
   9426 		if (status & WTX_ST_TU)
   9427 			WM_Q_EVCNT_INCR(txq, underrun);
   9428 #endif /* WM_EVENT_COUNTERS */
   9429 
   9430 		/*
   9431 		 * 82574 and newer's document says the status field has neither
   9432 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   9433 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   9434 		 * Developer's Manual", 82574 datasheet and newer.
   9435 		 *
   9436 		 * XXX I saw the LC bit was set on I218 even though the media
   9437 		 * was full duplex, so the bit might be used for other
   9438 		 * meaning ...(I have no document).
   9439 		 */
   9440 
   9441 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   9442 		    && ((sc->sc_type < WM_T_82574)
   9443 			|| (sc->sc_type == WM_T_80003))) {
   9444 			if_statinc(ifp, if_oerrors);
   9445 			if (status & WTX_ST_LC)
   9446 				log(LOG_WARNING, "%s: late collision\n",
   9447 				    device_xname(sc->sc_dev));
   9448 			else if (status & WTX_ST_EC) {
   9449 				if_statadd(ifp, if_collisions,
   9450 				    TX_COLLISION_THRESHOLD + 1);
   9451 				log(LOG_WARNING, "%s: excessive collisions\n",
   9452 				    device_xname(sc->sc_dev));
   9453 			}
   9454 		} else
   9455 			if_statinc(ifp, if_opackets);
   9456 
   9457 		txq->txq_packets++;
   9458 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   9459 
   9460 		txq->txq_free += txs->txs_ndesc;
   9461 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   9462 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   9463 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   9464 		m_freem(txs->txs_mbuf);
   9465 		txs->txs_mbuf = NULL;
   9466 	}
   9467 
   9468 	/* Update the dirty transmit buffer pointer. */
   9469 	txq->txq_sdirty = i;
   9470 	DPRINTF(sc, WM_DEBUG_TX,
   9471 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   9472 
   9473 	if (count != 0)
   9474 		rnd_add_uint32(&sc->rnd_source, count);
   9475 
   9476 	/*
   9477 	 * If there are no more pending transmissions, cancel the watchdog
   9478 	 * timer.
   9479 	 */
   9480 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   9481 		txq->txq_sending = false;
   9482 
   9483 	return more;
   9484 }
   9485 
   9486 static inline uint32_t
   9487 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   9488 {
   9489 	struct wm_softc *sc = rxq->rxq_sc;
   9490 
   9491 	if (sc->sc_type == WM_T_82574)
   9492 		return EXTRXC_STATUS(
   9493 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9494 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9495 		return NQRXC_STATUS(
   9496 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9497 	else
   9498 		return rxq->rxq_descs[idx].wrx_status;
   9499 }
   9500 
   9501 static inline uint32_t
   9502 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   9503 {
   9504 	struct wm_softc *sc = rxq->rxq_sc;
   9505 
   9506 	if (sc->sc_type == WM_T_82574)
   9507 		return EXTRXC_ERROR(
   9508 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9509 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9510 		return NQRXC_ERROR(
   9511 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9512 	else
   9513 		return rxq->rxq_descs[idx].wrx_errors;
   9514 }
   9515 
   9516 static inline uint16_t
   9517 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   9518 {
   9519 	struct wm_softc *sc = rxq->rxq_sc;
   9520 
   9521 	if (sc->sc_type == WM_T_82574)
   9522 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   9523 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9524 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   9525 	else
   9526 		return rxq->rxq_descs[idx].wrx_special;
   9527 }
   9528 
   9529 static inline int
   9530 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   9531 {
   9532 	struct wm_softc *sc = rxq->rxq_sc;
   9533 
   9534 	if (sc->sc_type == WM_T_82574)
   9535 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   9536 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9537 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   9538 	else
   9539 		return rxq->rxq_descs[idx].wrx_len;
   9540 }
   9541 
   9542 #ifdef WM_DEBUG
   9543 static inline uint32_t
   9544 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   9545 {
   9546 	struct wm_softc *sc = rxq->rxq_sc;
   9547 
   9548 	if (sc->sc_type == WM_T_82574)
   9549 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   9550 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9551 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   9552 	else
   9553 		return 0;
   9554 }
   9555 
   9556 static inline uint8_t
   9557 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   9558 {
   9559 	struct wm_softc *sc = rxq->rxq_sc;
   9560 
   9561 	if (sc->sc_type == WM_T_82574)
   9562 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   9563 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9564 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   9565 	else
   9566 		return 0;
   9567 }
   9568 #endif /* WM_DEBUG */
   9569 
   9570 static inline bool
   9571 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   9572     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9573 {
   9574 
   9575 	if (sc->sc_type == WM_T_82574)
   9576 		return (status & ext_bit) != 0;
   9577 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9578 		return (status & nq_bit) != 0;
   9579 	else
   9580 		return (status & legacy_bit) != 0;
   9581 }
   9582 
   9583 static inline bool
   9584 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   9585     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9586 {
   9587 
   9588 	if (sc->sc_type == WM_T_82574)
   9589 		return (error & ext_bit) != 0;
   9590 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9591 		return (error & nq_bit) != 0;
   9592 	else
   9593 		return (error & legacy_bit) != 0;
   9594 }
   9595 
   9596 static inline bool
   9597 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   9598 {
   9599 
   9600 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9601 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   9602 		return true;
   9603 	else
   9604 		return false;
   9605 }
   9606 
   9607 static inline bool
   9608 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   9609 {
   9610 	struct wm_softc *sc = rxq->rxq_sc;
   9611 
   9612 	/* XXX missing error bit for newqueue? */
   9613 	if (wm_rxdesc_is_set_error(sc, errors,
   9614 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   9615 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   9616 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   9617 		NQRXC_ERROR_RXE)) {
   9618 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   9619 		    EXTRXC_ERROR_SE, 0))
   9620 			log(LOG_WARNING, "%s: symbol error\n",
   9621 			    device_xname(sc->sc_dev));
   9622 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   9623 		    EXTRXC_ERROR_SEQ, 0))
   9624 			log(LOG_WARNING, "%s: receive sequence error\n",
   9625 			    device_xname(sc->sc_dev));
   9626 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   9627 		    EXTRXC_ERROR_CE, 0))
   9628 			log(LOG_WARNING, "%s: CRC error\n",
   9629 			    device_xname(sc->sc_dev));
   9630 		return true;
   9631 	}
   9632 
   9633 	return false;
   9634 }
   9635 
   9636 static inline bool
   9637 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   9638 {
   9639 	struct wm_softc *sc = rxq->rxq_sc;
   9640 
   9641 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   9642 		NQRXC_STATUS_DD)) {
   9643 		/* We have processed all of the receive descriptors. */
   9644 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   9645 		return false;
   9646 	}
   9647 
   9648 	return true;
   9649 }
   9650 
   9651 static inline bool
   9652 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   9653     uint16_t vlantag, struct mbuf *m)
   9654 {
   9655 
   9656 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9657 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   9658 		vlan_set_tag(m, le16toh(vlantag));
   9659 	}
   9660 
   9661 	return true;
   9662 }
   9663 
   9664 static inline void
   9665 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   9666     uint32_t errors, struct mbuf *m)
   9667 {
   9668 	struct wm_softc *sc = rxq->rxq_sc;
   9669 
   9670 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   9671 		if (wm_rxdesc_is_set_status(sc, status,
   9672 		    WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   9673 			WM_Q_EVCNT_INCR(rxq, ipsum);
   9674 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   9675 			if (wm_rxdesc_is_set_error(sc, errors,
   9676 			    WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   9677 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   9678 		}
   9679 		if (wm_rxdesc_is_set_status(sc, status,
   9680 		    WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   9681 			/*
   9682 			 * Note: we don't know if this was TCP or UDP,
   9683 			 * so we just set both bits, and expect the
   9684 			 * upper layers to deal.
   9685 			 */
   9686 			WM_Q_EVCNT_INCR(rxq, tusum);
   9687 			m->m_pkthdr.csum_flags |=
   9688 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9689 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   9690 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   9691 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   9692 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   9693 		}
   9694 	}
   9695 }
   9696 
   9697 /*
   9698  * wm_rxeof:
   9699  *
   9700  *	Helper; handle receive interrupts.
   9701  */
   9702 static bool
   9703 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   9704 {
   9705 	struct wm_softc *sc = rxq->rxq_sc;
   9706 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9707 	struct wm_rxsoft *rxs;
   9708 	struct mbuf *m;
   9709 	int i, len;
   9710 	int count = 0;
   9711 	uint32_t status, errors;
   9712 	uint16_t vlantag;
   9713 	bool more = false;
   9714 
   9715 	KASSERT(mutex_owned(rxq->rxq_lock));
   9716 
   9717 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   9718 		rxs = &rxq->rxq_soft[i];
   9719 
   9720 		DPRINTF(sc, WM_DEBUG_RX,
   9721 		    ("%s: RX: checking descriptor %d\n",
   9722 			device_xname(sc->sc_dev), i));
   9723 		wm_cdrxsync(rxq, i,
   9724 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9725 
   9726 		status = wm_rxdesc_get_status(rxq, i);
   9727 		errors = wm_rxdesc_get_errors(rxq, i);
   9728 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   9729 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   9730 #ifdef WM_DEBUG
   9731 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   9732 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   9733 #endif
   9734 
   9735 		if (!wm_rxdesc_dd(rxq, i, status))
   9736 			break;
   9737 
   9738 		if (limit-- == 0) {
   9739 			more = true;
   9740 			DPRINTF(sc, WM_DEBUG_RX,
   9741 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   9742 				device_xname(sc->sc_dev), i));
   9743 			break;
   9744 		}
   9745 
   9746 		count++;
   9747 		if (__predict_false(rxq->rxq_discard)) {
   9748 			DPRINTF(sc, WM_DEBUG_RX,
   9749 			    ("%s: RX: discarding contents of descriptor %d\n",
   9750 				device_xname(sc->sc_dev), i));
   9751 			wm_init_rxdesc(rxq, i);
   9752 			if (wm_rxdesc_is_eop(rxq, status)) {
   9753 				/* Reset our state. */
   9754 				DPRINTF(sc, WM_DEBUG_RX,
   9755 				    ("%s: RX: resetting rxdiscard -> 0\n",
   9756 					device_xname(sc->sc_dev)));
   9757 				rxq->rxq_discard = 0;
   9758 			}
   9759 			continue;
   9760 		}
   9761 
   9762 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9763 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   9764 
   9765 		m = rxs->rxs_mbuf;
   9766 
   9767 		/*
   9768 		 * Add a new receive buffer to the ring, unless of
   9769 		 * course the length is zero. Treat the latter as a
   9770 		 * failed mapping.
   9771 		 */
   9772 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   9773 			/*
   9774 			 * Failed, throw away what we've done so
   9775 			 * far, and discard the rest of the packet.
   9776 			 */
   9777 			if_statinc(ifp, if_ierrors);
   9778 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9779 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   9780 			wm_init_rxdesc(rxq, i);
   9781 			if (!wm_rxdesc_is_eop(rxq, status))
   9782 				rxq->rxq_discard = 1;
   9783 			if (rxq->rxq_head != NULL)
   9784 				m_freem(rxq->rxq_head);
   9785 			WM_RXCHAIN_RESET(rxq);
   9786 			DPRINTF(sc, WM_DEBUG_RX,
   9787 			    ("%s: RX: Rx buffer allocation failed, "
   9788 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   9789 				rxq->rxq_discard ? " (discard)" : ""));
   9790 			continue;
   9791 		}
   9792 
   9793 		m->m_len = len;
   9794 		rxq->rxq_len += len;
   9795 		DPRINTF(sc, WM_DEBUG_RX,
   9796 		    ("%s: RX: buffer at %p len %d\n",
   9797 			device_xname(sc->sc_dev), m->m_data, len));
   9798 
   9799 		/* If this is not the end of the packet, keep looking. */
   9800 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9801 			WM_RXCHAIN_LINK(rxq, m);
   9802 			DPRINTF(sc, WM_DEBUG_RX,
   9803 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9804 				device_xname(sc->sc_dev), rxq->rxq_len));
   9805 			continue;
   9806 		}
   9807 
   9808 		/*
   9809 		 * Okay, we have the entire packet now. The chip is
   9810 		 * configured to include the FCS except I35[04], I21[01].
   9811 		 * (not all chips can be configured to strip it), so we need
   9812 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
   9813 		 * in RCTL register is always set, so we don't trim it.
   9814 		 * PCH2 and newer chip also not include FCS when jumbo
   9815 		 * frame is used to do workaround an errata.
   9816 		 * May need to adjust length of previous mbuf in the
   9817 		 * chain if the current mbuf is too short.
   9818 		 */
   9819 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
   9820 			if (m->m_len < ETHER_CRC_LEN) {
   9821 				rxq->rxq_tail->m_len
   9822 				    -= (ETHER_CRC_LEN - m->m_len);
   9823 				m->m_len = 0;
   9824 			} else
   9825 				m->m_len -= ETHER_CRC_LEN;
   9826 			len = rxq->rxq_len - ETHER_CRC_LEN;
   9827 		} else
   9828 			len = rxq->rxq_len;
   9829 
   9830 		WM_RXCHAIN_LINK(rxq, m);
   9831 
   9832 		*rxq->rxq_tailp = NULL;
   9833 		m = rxq->rxq_head;
   9834 
   9835 		WM_RXCHAIN_RESET(rxq);
   9836 
   9837 		DPRINTF(sc, WM_DEBUG_RX,
   9838 		    ("%s: RX: have entire packet, len -> %d\n",
   9839 			device_xname(sc->sc_dev), len));
   9840 
   9841 		/* If an error occurred, update stats and drop the packet. */
   9842 		if (wm_rxdesc_has_errors(rxq, errors)) {
   9843 			m_freem(m);
   9844 			continue;
   9845 		}
   9846 
   9847 		/* No errors.  Receive the packet. */
   9848 		m_set_rcvif(m, ifp);
   9849 		m->m_pkthdr.len = len;
   9850 		/*
   9851 		 * TODO
   9852 		 * should be save rsshash and rsstype to this mbuf.
   9853 		 */
   9854 		DPRINTF(sc, WM_DEBUG_RX,
   9855 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   9856 			device_xname(sc->sc_dev), rsstype, rsshash));
   9857 
   9858 		/*
   9859 		 * If VLANs are enabled, VLAN packets have been unwrapped
   9860 		 * for us.  Associate the tag with the packet.
   9861 		 */
   9862 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   9863 			continue;
   9864 
   9865 		/* Set up checksum info for this packet. */
   9866 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   9867 
   9868 		rxq->rxq_packets++;
   9869 		rxq->rxq_bytes += len;
   9870 		/* Pass it on. */
   9871 		if_percpuq_enqueue(sc->sc_ipq, m);
   9872 
   9873 		if (rxq->rxq_stopping)
   9874 			break;
   9875 	}
   9876 	rxq->rxq_ptr = i;
   9877 
   9878 	if (count != 0)
   9879 		rnd_add_uint32(&sc->rnd_source, count);
   9880 
   9881 	DPRINTF(sc, WM_DEBUG_RX,
   9882 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   9883 
   9884 	return more;
   9885 }
   9886 
   9887 /*
   9888  * wm_linkintr_gmii:
   9889  *
   9890  *	Helper; handle link interrupts for GMII.
   9891  */
   9892 static void
   9893 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   9894 {
   9895 	device_t dev = sc->sc_dev;
   9896 	uint32_t status, reg;
   9897 	bool link;
   9898 	int rv;
   9899 
   9900 	KASSERT(mutex_owned(sc->sc_core_lock));
   9901 
   9902 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9903 		__func__));
   9904 
   9905 	if ((icr & ICR_LSC) == 0) {
   9906 		if (icr & ICR_RXSEQ)
   9907 			DPRINTF(sc, WM_DEBUG_LINK,
   9908 			    ("%s: LINK Receive sequence error\n",
   9909 				device_xname(dev)));
   9910 		return;
   9911 	}
   9912 
   9913 	/* Link status changed */
   9914 	status = CSR_READ(sc, WMREG_STATUS);
   9915 	link = status & STATUS_LU;
   9916 	if (link) {
   9917 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9918 			device_xname(dev),
   9919 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9920 		if (wm_phy_need_linkdown_discard(sc)) {
   9921 			DPRINTF(sc, WM_DEBUG_LINK,
   9922 			    ("%s: linkintr: Clear linkdown discard flag\n",
   9923 				device_xname(dev)));
   9924 			wm_clear_linkdown_discard(sc);
   9925 		}
   9926 	} else {
   9927 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9928 			device_xname(dev)));
   9929 		if (wm_phy_need_linkdown_discard(sc)) {
   9930 			DPRINTF(sc, WM_DEBUG_LINK,
   9931 			    ("%s: linkintr: Set linkdown discard flag\n",
   9932 				device_xname(dev)));
   9933 			wm_set_linkdown_discard(sc);
   9934 		}
   9935 	}
   9936 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9937 		wm_gig_downshift_workaround_ich8lan(sc);
   9938 
   9939 	if ((sc->sc_type == WM_T_ICH8) && (sc->sc_phytype == WMPHY_IGP_3))
   9940 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9941 
   9942 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9943 		device_xname(dev)));
   9944 	mii_pollstat(&sc->sc_mii);
   9945 	if (sc->sc_type == WM_T_82543) {
   9946 		int miistatus, active;
   9947 
   9948 		/*
   9949 		 * With 82543, we need to force speed and
   9950 		 * duplex on the MAC equal to what the PHY
   9951 		 * speed and duplex configuration is.
   9952 		 */
   9953 		miistatus = sc->sc_mii.mii_media_status;
   9954 
   9955 		if (miistatus & IFM_ACTIVE) {
   9956 			active = sc->sc_mii.mii_media_active;
   9957 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9958 			switch (IFM_SUBTYPE(active)) {
   9959 			case IFM_10_T:
   9960 				sc->sc_ctrl |= CTRL_SPEED_10;
   9961 				break;
   9962 			case IFM_100_TX:
   9963 				sc->sc_ctrl |= CTRL_SPEED_100;
   9964 				break;
   9965 			case IFM_1000_T:
   9966 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9967 				break;
   9968 			default:
   9969 				/*
   9970 				 * Fiber?
   9971 				 * Shoud not enter here.
   9972 				 */
   9973 				device_printf(dev, "unknown media (%x)\n",
   9974 				    active);
   9975 				break;
   9976 			}
   9977 			if (active & IFM_FDX)
   9978 				sc->sc_ctrl |= CTRL_FD;
   9979 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9980 		}
   9981 	} else if (sc->sc_type == WM_T_PCH) {
   9982 		wm_k1_gig_workaround_hv(sc,
   9983 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9984 	}
   9985 
   9986 	/*
   9987 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9988 	 * aggressive resulting in many collisions. To avoid this, increase
   9989 	 * the IPG and reduce Rx latency in the PHY.
   9990 	 */
   9991 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9992 	    && link) {
   9993 		uint32_t tipg_reg;
   9994 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9995 		bool fdx;
   9996 		uint16_t emi_addr, emi_val;
   9997 
   9998 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9999 		tipg_reg &= ~TIPG_IPGT_MASK;
   10000 		fdx = status & STATUS_FD;
   10001 
   10002 		if (!fdx && (speed == STATUS_SPEED_10)) {
   10003 			tipg_reg |= 0xff;
   10004 			/* Reduce Rx latency in analog PHY */
   10005 			emi_val = 0;
   10006 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   10007 		    fdx && speed != STATUS_SPEED_1000) {
   10008 			tipg_reg |= 0xc;
   10009 			emi_val = 1;
   10010 		} else {
   10011 			/* Roll back the default values */
   10012 			tipg_reg |= 0x08;
   10013 			emi_val = 1;
   10014 		}
   10015 
   10016 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   10017 
   10018 		rv = sc->phy.acquire(sc);
   10019 		if (rv)
   10020 			return;
   10021 
   10022 		if (sc->sc_type == WM_T_PCH2)
   10023 			emi_addr = I82579_RX_CONFIG;
   10024 		else
   10025 			emi_addr = I217_RX_CONFIG;
   10026 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   10027 
   10028 		if (sc->sc_type >= WM_T_PCH_LPT) {
   10029 			uint16_t phy_reg;
   10030 
   10031 			sc->phy.readreg_locked(dev, 2,
   10032 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   10033 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   10034 			if (speed == STATUS_SPEED_100
   10035 			    || speed == STATUS_SPEED_10)
   10036 				phy_reg |= 0x3e8;
   10037 			else
   10038 				phy_reg |= 0xfa;
   10039 			sc->phy.writereg_locked(dev, 2,
   10040 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   10041 
   10042 			if (speed == STATUS_SPEED_1000) {
   10043 				sc->phy.readreg_locked(dev, 2,
   10044 				    HV_PM_CTRL, &phy_reg);
   10045 
   10046 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   10047 
   10048 				sc->phy.writereg_locked(dev, 2,
   10049 				    HV_PM_CTRL, phy_reg);
   10050 			}
   10051 		}
   10052 		sc->phy.release(sc);
   10053 
   10054 		if (rv)
   10055 			return;
   10056 
   10057 		if (sc->sc_type >= WM_T_PCH_SPT) {
   10058 			uint16_t data, ptr_gap;
   10059 
   10060 			if (speed == STATUS_SPEED_1000) {
   10061 				rv = sc->phy.acquire(sc);
   10062 				if (rv)
   10063 					return;
   10064 
   10065 				rv = sc->phy.readreg_locked(dev, 2,
   10066 				    I82579_UNKNOWN1, &data);
   10067 				if (rv) {
   10068 					sc->phy.release(sc);
   10069 					return;
   10070 				}
   10071 
   10072 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   10073 				if (ptr_gap < 0x18) {
   10074 					data &= ~(0x3ff << 2);
   10075 					data |= (0x18 << 2);
   10076 					rv = sc->phy.writereg_locked(dev,
   10077 					    2, I82579_UNKNOWN1, data);
   10078 				}
   10079 				sc->phy.release(sc);
   10080 				if (rv)
   10081 					return;
   10082 			} else {
   10083 				rv = sc->phy.acquire(sc);
   10084 				if (rv)
   10085 					return;
   10086 
   10087 				rv = sc->phy.writereg_locked(dev, 2,
   10088 				    I82579_UNKNOWN1, 0xc023);
   10089 				sc->phy.release(sc);
   10090 				if (rv)
   10091 					return;
   10092 
   10093 			}
   10094 		}
   10095 	}
   10096 
   10097 	/*
   10098 	 * I217 Packet Loss issue:
   10099 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   10100 	 * on power up.
   10101 	 * Set the Beacon Duration for I217 to 8 usec
   10102 	 */
   10103 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10104 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   10105 		reg &= ~FEXTNVM4_BEACON_DURATION;
   10106 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   10107 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   10108 	}
   10109 
   10110 	/* Work-around I218 hang issue */
   10111 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   10112 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   10113 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   10114 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   10115 		wm_k1_workaround_lpt_lp(sc, link);
   10116 
   10117 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10118 		/*
   10119 		 * Set platform power management values for Latency
   10120 		 * Tolerance Reporting (LTR)
   10121 		 */
   10122 		wm_platform_pm_pch_lpt(sc,
   10123 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   10124 	}
   10125 
   10126 	/* Clear link partner's EEE ability */
   10127 	sc->eee_lp_ability = 0;
   10128 
   10129 	/* FEXTNVM6 K1-off workaround */
   10130 	if (sc->sc_type == WM_T_PCH_SPT) {
   10131 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   10132 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   10133 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   10134 		else
   10135 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   10136 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   10137 	}
   10138 
   10139 	if (!link)
   10140 		return;
   10141 
   10142 	switch (sc->sc_type) {
   10143 	case WM_T_PCH2:
   10144 		wm_k1_workaround_lv(sc);
   10145 		/* FALLTHROUGH */
   10146 	case WM_T_PCH:
   10147 		if (sc->sc_phytype == WMPHY_82578)
   10148 			wm_link_stall_workaround_hv(sc);
   10149 		break;
   10150 	default:
   10151 		break;
   10152 	}
   10153 
   10154 	/* Enable/Disable EEE after link up */
   10155 	if (sc->sc_phytype > WMPHY_82579)
   10156 		wm_set_eee_pchlan(sc);
   10157 }
   10158 
   10159 /*
   10160  * wm_linkintr_tbi:
   10161  *
   10162  *	Helper; handle link interrupts for TBI mode.
   10163  */
   10164 static void
   10165 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   10166 {
   10167 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10168 	uint32_t status;
   10169 
   10170 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10171 		__func__));
   10172 
   10173 	status = CSR_READ(sc, WMREG_STATUS);
   10174 	if (icr & ICR_LSC) {
   10175 		wm_check_for_link(sc);
   10176 		if (status & STATUS_LU) {
   10177 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   10178 				device_xname(sc->sc_dev),
   10179 				(status & STATUS_FD) ? "FDX" : "HDX"));
   10180 			/*
   10181 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10182 			 * so we should update sc->sc_ctrl
   10183 			 */
   10184 
   10185 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10186 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10187 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10188 			if (status & STATUS_FD)
   10189 				sc->sc_tctl |=
   10190 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10191 			else
   10192 				sc->sc_tctl |=
   10193 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10194 			if (sc->sc_ctrl & CTRL_TFCE)
   10195 				sc->sc_fcrtl |= FCRTL_XONE;
   10196 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10197 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10198 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   10199 			sc->sc_tbi_linkup = 1;
   10200 			if_link_state_change(ifp, LINK_STATE_UP);
   10201 		} else {
   10202 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10203 				device_xname(sc->sc_dev)));
   10204 			sc->sc_tbi_linkup = 0;
   10205 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10206 		}
   10207 		/* Update LED */
   10208 		wm_tbi_serdes_set_linkled(sc);
   10209 	} else if (icr & ICR_RXSEQ)
   10210 		DPRINTF(sc, WM_DEBUG_LINK,
   10211 		    ("%s: LINK: Receive sequence error\n",
   10212 			device_xname(sc->sc_dev)));
   10213 }
   10214 
   10215 /*
   10216  * wm_linkintr_serdes:
   10217  *
   10218  *	Helper; handle link interrupts for TBI mode.
   10219  */
   10220 static void
   10221 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   10222 {
   10223 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10224 	struct mii_data *mii = &sc->sc_mii;
   10225 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10226 	uint32_t pcs_adv, pcs_lpab, reg;
   10227 
   10228 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10229 		__func__));
   10230 
   10231 	if (icr & ICR_LSC) {
   10232 		/* Check PCS */
   10233 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10234 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   10235 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   10236 				device_xname(sc->sc_dev)));
   10237 			mii->mii_media_status |= IFM_ACTIVE;
   10238 			sc->sc_tbi_linkup = 1;
   10239 			if_link_state_change(ifp, LINK_STATE_UP);
   10240 		} else {
   10241 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10242 				device_xname(sc->sc_dev)));
   10243 			mii->mii_media_status |= IFM_NONE;
   10244 			sc->sc_tbi_linkup = 0;
   10245 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10246 			wm_tbi_serdes_set_linkled(sc);
   10247 			return;
   10248 		}
   10249 		mii->mii_media_active |= IFM_1000_SX;
   10250 		if ((reg & PCS_LSTS_FDX) != 0)
   10251 			mii->mii_media_active |= IFM_FDX;
   10252 		else
   10253 			mii->mii_media_active |= IFM_HDX;
   10254 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10255 			/* Check flow */
   10256 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10257 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10258 				DPRINTF(sc, WM_DEBUG_LINK,
   10259 				    ("XXX LINKOK but not ACOMP\n"));
   10260 				return;
   10261 			}
   10262 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10263 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10264 			DPRINTF(sc, WM_DEBUG_LINK,
   10265 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   10266 			if ((pcs_adv & TXCW_SYM_PAUSE)
   10267 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10268 				mii->mii_media_active |= IFM_FLOW
   10269 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10270 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10271 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10272 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   10273 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10274 				mii->mii_media_active |= IFM_FLOW
   10275 				    | IFM_ETH_TXPAUSE;
   10276 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   10277 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10278 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10279 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10280 				mii->mii_media_active |= IFM_FLOW
   10281 				    | IFM_ETH_RXPAUSE;
   10282 		}
   10283 		/* Update LED */
   10284 		wm_tbi_serdes_set_linkled(sc);
   10285 	} else
   10286 		DPRINTF(sc, WM_DEBUG_LINK,
   10287 		    ("%s: LINK: Receive sequence error\n",
   10288 		    device_xname(sc->sc_dev)));
   10289 }
   10290 
   10291 /*
   10292  * wm_linkintr:
   10293  *
   10294  *	Helper; handle link interrupts.
   10295  */
   10296 static void
   10297 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   10298 {
   10299 
   10300 	KASSERT(mutex_owned(sc->sc_core_lock));
   10301 
   10302 	if (sc->sc_flags & WM_F_HAS_MII)
   10303 		wm_linkintr_gmii(sc, icr);
   10304 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10305 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   10306 		wm_linkintr_serdes(sc, icr);
   10307 	else
   10308 		wm_linkintr_tbi(sc, icr);
   10309 }
   10310 
   10311 
   10312 static inline void
   10313 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   10314 {
   10315 
   10316 	if (wmq->wmq_txrx_use_workqueue) {
   10317 		if (!wmq->wmq_wq_enqueued) {
   10318 			wmq->wmq_wq_enqueued = true;
   10319 			workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie,
   10320 			    curcpu());
   10321 		}
   10322 	} else
   10323 		softint_schedule(wmq->wmq_si);
   10324 }
   10325 
   10326 static inline void
   10327 wm_legacy_intr_disable(struct wm_softc *sc)
   10328 {
   10329 
   10330 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   10331 }
   10332 
   10333 static inline void
   10334 wm_legacy_intr_enable(struct wm_softc *sc)
   10335 {
   10336 
   10337 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   10338 }
   10339 
   10340 /*
   10341  * wm_intr_legacy:
   10342  *
   10343  *	Interrupt service routine for INTx and MSI.
   10344  */
   10345 static int
   10346 wm_intr_legacy(void *arg)
   10347 {
   10348 	struct wm_softc *sc = arg;
   10349 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10350 	struct wm_queue *wmq = &sc->sc_queue[0];
   10351 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10352 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10353 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10354 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10355 	uint32_t icr, rndval = 0;
   10356 	bool more = false;
   10357 
   10358 	icr = CSR_READ(sc, WMREG_ICR);
   10359 	if ((icr & sc->sc_icr) == 0)
   10360 		return 0;
   10361 
   10362 	DPRINTF(sc, WM_DEBUG_TX,
   10363 	    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   10364 	if (rndval == 0)
   10365 		rndval = icr;
   10366 
   10367 	mutex_enter(txq->txq_lock);
   10368 
   10369 	if (txq->txq_stopping) {
   10370 		mutex_exit(txq->txq_lock);
   10371 		return 1;
   10372 	}
   10373 
   10374 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10375 	if (icr & ICR_TXDW) {
   10376 		DPRINTF(sc, WM_DEBUG_TX,
   10377 		    ("%s: TX: got TXDW interrupt\n",
   10378 			device_xname(sc->sc_dev)));
   10379 		WM_Q_EVCNT_INCR(txq, txdw);
   10380 	}
   10381 #endif
   10382 	if (txlimit > 0) {
   10383 		more |= wm_txeof(txq, txlimit);
   10384 		if (!IF_IS_EMPTY(&ifp->if_snd))
   10385 			more = true;
   10386 	} else
   10387 		more = true;
   10388 	mutex_exit(txq->txq_lock);
   10389 
   10390 	mutex_enter(rxq->rxq_lock);
   10391 
   10392 	if (rxq->rxq_stopping) {
   10393 		mutex_exit(rxq->rxq_lock);
   10394 		return 1;
   10395 	}
   10396 
   10397 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10398 	if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   10399 		DPRINTF(sc, WM_DEBUG_RX,
   10400 		    ("%s: RX: got Rx intr %#" __PRIxBIT "\n",
   10401 			device_xname(sc->sc_dev),
   10402 			icr & (ICR_RXDMT0 | ICR_RXT0)));
   10403 		WM_Q_EVCNT_INCR(rxq, intr);
   10404 	}
   10405 #endif
   10406 	if (rxlimit > 0) {
   10407 		/*
   10408 		 * wm_rxeof() does *not* call upper layer functions directly,
   10409 		 * as if_percpuq_enqueue() just call softint_schedule().
   10410 		 * So, we can call wm_rxeof() in interrupt context.
   10411 		 */
   10412 		more = wm_rxeof(rxq, rxlimit);
   10413 	} else
   10414 		more = true;
   10415 
   10416 	mutex_exit(rxq->rxq_lock);
   10417 
   10418 	mutex_enter(sc->sc_core_lock);
   10419 
   10420 	if (sc->sc_core_stopping) {
   10421 		mutex_exit(sc->sc_core_lock);
   10422 		return 1;
   10423 	}
   10424 
   10425 	if (icr & (ICR_LSC | ICR_RXSEQ)) {
   10426 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10427 		wm_linkintr(sc, icr);
   10428 	}
   10429 	if ((icr & ICR_GPI(0)) != 0)
   10430 		device_printf(sc->sc_dev, "got module interrupt\n");
   10431 
   10432 	mutex_exit(sc->sc_core_lock);
   10433 
   10434 	if (icr & ICR_RXO) {
   10435 #if defined(WM_DEBUG)
   10436 		log(LOG_WARNING, "%s: Receive overrun\n",
   10437 		    device_xname(sc->sc_dev));
   10438 #endif /* defined(WM_DEBUG) */
   10439 	}
   10440 
   10441 	rnd_add_uint32(&sc->rnd_source, rndval);
   10442 
   10443 	if (more) {
   10444 		/* Try to get more packets going. */
   10445 		wm_legacy_intr_disable(sc);
   10446 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10447 		wm_sched_handle_queue(sc, wmq);
   10448 	}
   10449 
   10450 	return 1;
   10451 }
   10452 
   10453 static inline void
   10454 wm_txrxintr_disable(struct wm_queue *wmq)
   10455 {
   10456 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10457 
   10458 	if (__predict_false(!wm_is_using_msix(sc))) {
   10459 		wm_legacy_intr_disable(sc);
   10460 		return;
   10461 	}
   10462 
   10463 	if (sc->sc_type == WM_T_82574)
   10464 		CSR_WRITE(sc, WMREG_IMC,
   10465 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   10466 	else if (sc->sc_type == WM_T_82575)
   10467 		CSR_WRITE(sc, WMREG_EIMC,
   10468 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10469 	else
   10470 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   10471 }
   10472 
   10473 static inline void
   10474 wm_txrxintr_enable(struct wm_queue *wmq)
   10475 {
   10476 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10477 
   10478 	wm_itrs_calculate(sc, wmq);
   10479 
   10480 	if (__predict_false(!wm_is_using_msix(sc))) {
   10481 		wm_legacy_intr_enable(sc);
   10482 		return;
   10483 	}
   10484 
   10485 	/*
   10486 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   10487 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   10488 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   10489 	 * while each wm_handle_queue(wmq) is runnig.
   10490 	 */
   10491 	if (sc->sc_type == WM_T_82574)
   10492 		CSR_WRITE(sc, WMREG_IMS,
   10493 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   10494 	else if (sc->sc_type == WM_T_82575)
   10495 		CSR_WRITE(sc, WMREG_EIMS,
   10496 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10497 	else
   10498 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   10499 }
   10500 
   10501 static int
   10502 wm_txrxintr_msix(void *arg)
   10503 {
   10504 	struct wm_queue *wmq = arg;
   10505 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10506 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10507 	struct wm_softc *sc = txq->txq_sc;
   10508 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10509 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10510 	bool txmore;
   10511 	bool rxmore;
   10512 
   10513 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   10514 
   10515 	DPRINTF(sc, WM_DEBUG_TX,
   10516 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   10517 
   10518 	wm_txrxintr_disable(wmq);
   10519 
   10520 	mutex_enter(txq->txq_lock);
   10521 
   10522 	if (txq->txq_stopping) {
   10523 		mutex_exit(txq->txq_lock);
   10524 		return 1;
   10525 	}
   10526 
   10527 	WM_Q_EVCNT_INCR(txq, txdw);
   10528 	if (txlimit > 0) {
   10529 		txmore = wm_txeof(txq, txlimit);
   10530 		/* wm_deferred start() is done in wm_handle_queue(). */
   10531 	} else
   10532 		txmore = true;
   10533 	mutex_exit(txq->txq_lock);
   10534 
   10535 	DPRINTF(sc, WM_DEBUG_RX,
   10536 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   10537 	mutex_enter(rxq->rxq_lock);
   10538 
   10539 	if (rxq->rxq_stopping) {
   10540 		mutex_exit(rxq->rxq_lock);
   10541 		return 1;
   10542 	}
   10543 
   10544 	WM_Q_EVCNT_INCR(rxq, intr);
   10545 	if (rxlimit > 0) {
   10546 		rxmore = wm_rxeof(rxq, rxlimit);
   10547 	} else
   10548 		rxmore = true;
   10549 	mutex_exit(rxq->rxq_lock);
   10550 
   10551 	wm_itrs_writereg(sc, wmq);
   10552 
   10553 	if (txmore || rxmore) {
   10554 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10555 		wm_sched_handle_queue(sc, wmq);
   10556 	} else
   10557 		wm_txrxintr_enable(wmq);
   10558 
   10559 	return 1;
   10560 }
   10561 
   10562 static void
   10563 wm_handle_queue(void *arg)
   10564 {
   10565 	struct wm_queue *wmq = arg;
   10566 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10567 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10568 	struct wm_softc *sc = txq->txq_sc;
   10569 	u_int txlimit = sc->sc_tx_process_limit;
   10570 	u_int rxlimit = sc->sc_rx_process_limit;
   10571 	bool txmore;
   10572 	bool rxmore;
   10573 
   10574 	mutex_enter(txq->txq_lock);
   10575 	if (txq->txq_stopping) {
   10576 		mutex_exit(txq->txq_lock);
   10577 		return;
   10578 	}
   10579 	txmore = wm_txeof(txq, txlimit);
   10580 	wm_deferred_start_locked(txq);
   10581 	mutex_exit(txq->txq_lock);
   10582 
   10583 	mutex_enter(rxq->rxq_lock);
   10584 	if (rxq->rxq_stopping) {
   10585 		mutex_exit(rxq->rxq_lock);
   10586 		return;
   10587 	}
   10588 	WM_Q_EVCNT_INCR(rxq, defer);
   10589 	rxmore = wm_rxeof(rxq, rxlimit);
   10590 	mutex_exit(rxq->rxq_lock);
   10591 
   10592 	if (txmore || rxmore) {
   10593 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10594 		wm_sched_handle_queue(sc, wmq);
   10595 	} else
   10596 		wm_txrxintr_enable(wmq);
   10597 }
   10598 
   10599 static void
   10600 wm_handle_queue_work(struct work *wk, void *context)
   10601 {
   10602 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   10603 
   10604 	/*
   10605 	 * Some qemu environment workaround.  They don't stop interrupt
   10606 	 * immediately.
   10607 	 */
   10608 	wmq->wmq_wq_enqueued = false;
   10609 	wm_handle_queue(wmq);
   10610 }
   10611 
   10612 /*
   10613  * wm_linkintr_msix:
   10614  *
   10615  *	Interrupt service routine for link status change for MSI-X.
   10616  */
   10617 static int
   10618 wm_linkintr_msix(void *arg)
   10619 {
   10620 	struct wm_softc *sc = arg;
   10621 	uint32_t reg;
   10622 	bool has_rxo;
   10623 
   10624 	reg = CSR_READ(sc, WMREG_ICR);
   10625 	mutex_enter(sc->sc_core_lock);
   10626 	DPRINTF(sc, WM_DEBUG_LINK,
   10627 	    ("%s: LINK: got link intr. ICR = %08x\n",
   10628 		device_xname(sc->sc_dev), reg));
   10629 
   10630 	if (sc->sc_core_stopping)
   10631 		goto out;
   10632 
   10633 	if ((reg & ICR_LSC) != 0) {
   10634 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10635 		wm_linkintr(sc, ICR_LSC);
   10636 	}
   10637 	if ((reg & ICR_GPI(0)) != 0)
   10638 		device_printf(sc->sc_dev, "got module interrupt\n");
   10639 
   10640 	/*
   10641 	 * XXX 82574 MSI-X mode workaround
   10642 	 *
   10643 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   10644 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   10645 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   10646 	 * interrupts by writing WMREG_ICS to process receive packets.
   10647 	 */
   10648 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   10649 #if defined(WM_DEBUG)
   10650 		log(LOG_WARNING, "%s: Receive overrun\n",
   10651 		    device_xname(sc->sc_dev));
   10652 #endif /* defined(WM_DEBUG) */
   10653 
   10654 		has_rxo = true;
   10655 		/*
   10656 		 * The RXO interrupt is very high rate when receive traffic is
   10657 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   10658 		 * interrupts. ICR_OTHER will be enabled at the end of
   10659 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   10660 		 * ICR_RXQ(1) interrupts.
   10661 		 */
   10662 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   10663 
   10664 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   10665 	}
   10666 
   10667 
   10668 
   10669 out:
   10670 	mutex_exit(sc->sc_core_lock);
   10671 
   10672 	if (sc->sc_type == WM_T_82574) {
   10673 		if (!has_rxo)
   10674 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   10675 		else
   10676 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   10677 	} else if (sc->sc_type == WM_T_82575)
   10678 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   10679 	else
   10680 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   10681 
   10682 	return 1;
   10683 }
   10684 
   10685 /*
   10686  * Media related.
   10687  * GMII, SGMII, TBI (and SERDES)
   10688  */
   10689 
   10690 /* Common */
   10691 
   10692 /*
   10693  * wm_tbi_serdes_set_linkled:
   10694  *
   10695  *	Update the link LED on TBI and SERDES devices.
   10696  */
   10697 static void
   10698 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   10699 {
   10700 
   10701 	if (sc->sc_tbi_linkup)
   10702 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   10703 	else
   10704 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   10705 
   10706 	/* 82540 or newer devices are active low */
   10707 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   10708 
   10709 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10710 }
   10711 
   10712 /* GMII related */
   10713 
   10714 /*
   10715  * wm_gmii_reset:
   10716  *
   10717  *	Reset the PHY.
   10718  */
   10719 static void
   10720 wm_gmii_reset(struct wm_softc *sc)
   10721 {
   10722 	uint32_t reg;
   10723 	int rv;
   10724 
   10725 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10726 		device_xname(sc->sc_dev), __func__));
   10727 
   10728 	rv = sc->phy.acquire(sc);
   10729 	if (rv != 0) {
   10730 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10731 		    __func__);
   10732 		return;
   10733 	}
   10734 
   10735 	switch (sc->sc_type) {
   10736 	case WM_T_82542_2_0:
   10737 	case WM_T_82542_2_1:
   10738 		/* null */
   10739 		break;
   10740 	case WM_T_82543:
   10741 		/*
   10742 		 * With 82543, we need to force speed and duplex on the MAC
   10743 		 * equal to what the PHY speed and duplex configuration is.
   10744 		 * In addition, we need to perform a hardware reset on the PHY
   10745 		 * to take it out of reset.
   10746 		 */
   10747 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10748 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10749 
   10750 		/* The PHY reset pin is active-low. */
   10751 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10752 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   10753 		    CTRL_EXT_SWDPIN(4));
   10754 		reg |= CTRL_EXT_SWDPIO(4);
   10755 
   10756 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10757 		CSR_WRITE_FLUSH(sc);
   10758 		delay(10*1000);
   10759 
   10760 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   10761 		CSR_WRITE_FLUSH(sc);
   10762 		delay(150);
   10763 #if 0
   10764 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   10765 #endif
   10766 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   10767 		break;
   10768 	case WM_T_82544:	/* Reset 10000us */
   10769 	case WM_T_82540:
   10770 	case WM_T_82545:
   10771 	case WM_T_82545_3:
   10772 	case WM_T_82546:
   10773 	case WM_T_82546_3:
   10774 	case WM_T_82541:
   10775 	case WM_T_82541_2:
   10776 	case WM_T_82547:
   10777 	case WM_T_82547_2:
   10778 	case WM_T_82571:	/* Reset 100us */
   10779 	case WM_T_82572:
   10780 	case WM_T_82573:
   10781 	case WM_T_82574:
   10782 	case WM_T_82575:
   10783 	case WM_T_82576:
   10784 	case WM_T_82580:
   10785 	case WM_T_I350:
   10786 	case WM_T_I354:
   10787 	case WM_T_I210:
   10788 	case WM_T_I211:
   10789 	case WM_T_82583:
   10790 	case WM_T_80003:
   10791 		/* Generic reset */
   10792 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10793 		CSR_WRITE_FLUSH(sc);
   10794 		delay(20000);
   10795 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10796 		CSR_WRITE_FLUSH(sc);
   10797 		delay(20000);
   10798 
   10799 		if ((sc->sc_type == WM_T_82541)
   10800 		    || (sc->sc_type == WM_T_82541_2)
   10801 		    || (sc->sc_type == WM_T_82547)
   10802 		    || (sc->sc_type == WM_T_82547_2)) {
   10803 			/* Workaround for igp are done in igp_reset() */
   10804 			/* XXX add code to set LED after phy reset */
   10805 		}
   10806 		break;
   10807 	case WM_T_ICH8:
   10808 	case WM_T_ICH9:
   10809 	case WM_T_ICH10:
   10810 	case WM_T_PCH:
   10811 	case WM_T_PCH2:
   10812 	case WM_T_PCH_LPT:
   10813 	case WM_T_PCH_SPT:
   10814 	case WM_T_PCH_CNP:
   10815 		/* Generic reset */
   10816 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10817 		CSR_WRITE_FLUSH(sc);
   10818 		delay(100);
   10819 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10820 		CSR_WRITE_FLUSH(sc);
   10821 		delay(150);
   10822 		break;
   10823 	default:
   10824 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   10825 		    __func__);
   10826 		break;
   10827 	}
   10828 
   10829 	sc->phy.release(sc);
   10830 
   10831 	/* get_cfg_done */
   10832 	wm_get_cfg_done(sc);
   10833 
   10834 	/* Extra setup */
   10835 	switch (sc->sc_type) {
   10836 	case WM_T_82542_2_0:
   10837 	case WM_T_82542_2_1:
   10838 	case WM_T_82543:
   10839 	case WM_T_82544:
   10840 	case WM_T_82540:
   10841 	case WM_T_82545:
   10842 	case WM_T_82545_3:
   10843 	case WM_T_82546:
   10844 	case WM_T_82546_3:
   10845 	case WM_T_82541_2:
   10846 	case WM_T_82547_2:
   10847 	case WM_T_82571:
   10848 	case WM_T_82572:
   10849 	case WM_T_82573:
   10850 	case WM_T_82574:
   10851 	case WM_T_82583:
   10852 	case WM_T_82575:
   10853 	case WM_T_82576:
   10854 	case WM_T_82580:
   10855 	case WM_T_I350:
   10856 	case WM_T_I354:
   10857 	case WM_T_I210:
   10858 	case WM_T_I211:
   10859 	case WM_T_80003:
   10860 		/* Null */
   10861 		break;
   10862 	case WM_T_82541:
   10863 	case WM_T_82547:
   10864 		/* XXX Configure actively LED after PHY reset */
   10865 		break;
   10866 	case WM_T_ICH8:
   10867 	case WM_T_ICH9:
   10868 	case WM_T_ICH10:
   10869 	case WM_T_PCH:
   10870 	case WM_T_PCH2:
   10871 	case WM_T_PCH_LPT:
   10872 	case WM_T_PCH_SPT:
   10873 	case WM_T_PCH_CNP:
   10874 		wm_phy_post_reset(sc);
   10875 		break;
   10876 	default:
   10877 		panic("%s: unknown type\n", __func__);
   10878 		break;
   10879 	}
   10880 }
   10881 
   10882 /*
   10883  * Set up sc_phytype and mii_{read|write}reg.
   10884  *
   10885  *  To identify PHY type, correct read/write function should be selected.
   10886  * To select correct read/write function, PCI ID or MAC type are required
   10887  * without accessing PHY registers.
   10888  *
   10889  *  On the first call of this function, PHY ID is not known yet. Check
   10890  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   10891  * result might be incorrect.
   10892  *
   10893  *  In the second call, PHY OUI and model is used to identify PHY type.
   10894  * It might not be perfect because of the lack of compared entry, but it
   10895  * would be better than the first call.
   10896  *
   10897  *  If the detected new result and previous assumption is different,
   10898  * a diagnostic message will be printed.
   10899  */
   10900 static void
   10901 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   10902     uint16_t phy_model)
   10903 {
   10904 	device_t dev = sc->sc_dev;
   10905 	struct mii_data *mii = &sc->sc_mii;
   10906 	uint16_t new_phytype = WMPHY_UNKNOWN;
   10907 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   10908 	mii_readreg_t new_readreg;
   10909 	mii_writereg_t new_writereg;
   10910 	bool dodiag = true;
   10911 
   10912 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10913 		device_xname(sc->sc_dev), __func__));
   10914 
   10915 	/*
   10916 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   10917 	 * incorrect. So don't print diag output when it's 2nd call.
   10918 	 */
   10919 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   10920 		dodiag = false;
   10921 
   10922 	if (mii->mii_readreg == NULL) {
   10923 		/*
   10924 		 *  This is the first call of this function. For ICH and PCH
   10925 		 * variants, it's difficult to determine the PHY access method
   10926 		 * by sc_type, so use the PCI product ID for some devices.
   10927 		 */
   10928 
   10929 		switch (sc->sc_pcidevid) {
   10930 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   10931 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   10932 			/* 82577 */
   10933 			new_phytype = WMPHY_82577;
   10934 			break;
   10935 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   10936 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   10937 			/* 82578 */
   10938 			new_phytype = WMPHY_82578;
   10939 			break;
   10940 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   10941 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   10942 			/* 82579 */
   10943 			new_phytype = WMPHY_82579;
   10944 			break;
   10945 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   10946 		case PCI_PRODUCT_INTEL_82801I_BM:
   10947 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   10948 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   10949 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   10950 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   10951 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   10952 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   10953 			/* ICH8, 9, 10 with 82567 */
   10954 			new_phytype = WMPHY_BM;
   10955 			break;
   10956 		default:
   10957 			break;
   10958 		}
   10959 	} else {
   10960 		/* It's not the first call. Use PHY OUI and model */
   10961 		switch (phy_oui) {
   10962 		case MII_OUI_ATTANSIC: /* atphy(4) */
   10963 			switch (phy_model) {
   10964 			case MII_MODEL_ATTANSIC_AR8021:
   10965 				new_phytype = WMPHY_82578;
   10966 				break;
   10967 			default:
   10968 				break;
   10969 			}
   10970 			break;
   10971 		case MII_OUI_xxMARVELL:
   10972 			switch (phy_model) {
   10973 			case MII_MODEL_xxMARVELL_I210:
   10974 				new_phytype = WMPHY_I210;
   10975 				break;
   10976 			case MII_MODEL_xxMARVELL_E1011:
   10977 			case MII_MODEL_xxMARVELL_E1000_3:
   10978 			case MII_MODEL_xxMARVELL_E1000_5:
   10979 			case MII_MODEL_xxMARVELL_E1112:
   10980 				new_phytype = WMPHY_M88;
   10981 				break;
   10982 			case MII_MODEL_xxMARVELL_E1149:
   10983 				new_phytype = WMPHY_BM;
   10984 				break;
   10985 			case MII_MODEL_xxMARVELL_E1111:
   10986 			case MII_MODEL_xxMARVELL_I347:
   10987 			case MII_MODEL_xxMARVELL_E1512:
   10988 			case MII_MODEL_xxMARVELL_E1340M:
   10989 			case MII_MODEL_xxMARVELL_E1543:
   10990 				new_phytype = WMPHY_M88;
   10991 				break;
   10992 			case MII_MODEL_xxMARVELL_I82563:
   10993 				new_phytype = WMPHY_GG82563;
   10994 				break;
   10995 			default:
   10996 				break;
   10997 			}
   10998 			break;
   10999 		case MII_OUI_INTEL:
   11000 			switch (phy_model) {
   11001 			case MII_MODEL_INTEL_I82577:
   11002 				new_phytype = WMPHY_82577;
   11003 				break;
   11004 			case MII_MODEL_INTEL_I82579:
   11005 				new_phytype = WMPHY_82579;
   11006 				break;
   11007 			case MII_MODEL_INTEL_I217:
   11008 				new_phytype = WMPHY_I217;
   11009 				break;
   11010 			case MII_MODEL_INTEL_I82580:
   11011 				new_phytype = WMPHY_82580;
   11012 				break;
   11013 			case MII_MODEL_INTEL_I350:
   11014 				new_phytype = WMPHY_I350;
   11015 				break;
   11016 			default:
   11017 				break;
   11018 			}
   11019 			break;
   11020 		case MII_OUI_yyINTEL:
   11021 			switch (phy_model) {
   11022 			case MII_MODEL_yyINTEL_I82562G:
   11023 			case MII_MODEL_yyINTEL_I82562EM:
   11024 			case MII_MODEL_yyINTEL_I82562ET:
   11025 				new_phytype = WMPHY_IFE;
   11026 				break;
   11027 			case MII_MODEL_yyINTEL_IGP01E1000:
   11028 				new_phytype = WMPHY_IGP;
   11029 				break;
   11030 			case MII_MODEL_yyINTEL_I82566:
   11031 				new_phytype = WMPHY_IGP_3;
   11032 				break;
   11033 			default:
   11034 				break;
   11035 			}
   11036 			break;
   11037 		default:
   11038 			break;
   11039 		}
   11040 
   11041 		if (dodiag) {
   11042 			if (new_phytype == WMPHY_UNKNOWN)
   11043 				aprint_verbose_dev(dev,
   11044 				    "%s: Unknown PHY model. OUI=%06x, "
   11045 				    "model=%04x\n", __func__, phy_oui,
   11046 				    phy_model);
   11047 
   11048 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11049 			    && (sc->sc_phytype != new_phytype)) {
   11050 				aprint_error_dev(dev, "Previously assumed PHY "
   11051 				    "type(%u) was incorrect. PHY type from PHY"
   11052 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   11053 			}
   11054 		}
   11055 	}
   11056 
   11057 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   11058 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   11059 		/* SGMII */
   11060 		new_readreg = wm_sgmii_readreg;
   11061 		new_writereg = wm_sgmii_writereg;
   11062 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11063 		/* BM2 (phyaddr == 1) */
   11064 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11065 		    && (new_phytype != WMPHY_BM)
   11066 		    && (new_phytype != WMPHY_UNKNOWN))
   11067 			doubt_phytype = new_phytype;
   11068 		new_phytype = WMPHY_BM;
   11069 		new_readreg = wm_gmii_bm_readreg;
   11070 		new_writereg = wm_gmii_bm_writereg;
   11071 	} else if (sc->sc_type >= WM_T_PCH) {
   11072 		/* All PCH* use _hv_ */
   11073 		new_readreg = wm_gmii_hv_readreg;
   11074 		new_writereg = wm_gmii_hv_writereg;
   11075 	} else if (sc->sc_type >= WM_T_ICH8) {
   11076 		/* non-82567 ICH8, 9 and 10 */
   11077 		new_readreg = wm_gmii_i82544_readreg;
   11078 		new_writereg = wm_gmii_i82544_writereg;
   11079 	} else if (sc->sc_type >= WM_T_80003) {
   11080 		/* 80003 */
   11081 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11082 		    && (new_phytype != WMPHY_GG82563)
   11083 		    && (new_phytype != WMPHY_UNKNOWN))
   11084 			doubt_phytype = new_phytype;
   11085 		new_phytype = WMPHY_GG82563;
   11086 		new_readreg = wm_gmii_i80003_readreg;
   11087 		new_writereg = wm_gmii_i80003_writereg;
   11088 	} else if (sc->sc_type >= WM_T_I210) {
   11089 		/* I210 and I211 */
   11090 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11091 		    && (new_phytype != WMPHY_I210)
   11092 		    && (new_phytype != WMPHY_UNKNOWN))
   11093 			doubt_phytype = new_phytype;
   11094 		new_phytype = WMPHY_I210;
   11095 		new_readreg = wm_gmii_gs40g_readreg;
   11096 		new_writereg = wm_gmii_gs40g_writereg;
   11097 	} else if (sc->sc_type >= WM_T_82580) {
   11098 		/* 82580, I350 and I354 */
   11099 		new_readreg = wm_gmii_82580_readreg;
   11100 		new_writereg = wm_gmii_82580_writereg;
   11101 	} else if (sc->sc_type >= WM_T_82544) {
   11102 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   11103 		new_readreg = wm_gmii_i82544_readreg;
   11104 		new_writereg = wm_gmii_i82544_writereg;
   11105 	} else {
   11106 		new_readreg = wm_gmii_i82543_readreg;
   11107 		new_writereg = wm_gmii_i82543_writereg;
   11108 	}
   11109 
   11110 	if (new_phytype == WMPHY_BM) {
   11111 		/* All BM use _bm_ */
   11112 		new_readreg = wm_gmii_bm_readreg;
   11113 		new_writereg = wm_gmii_bm_writereg;
   11114 	}
   11115 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   11116 		/* All PCH* use _hv_ */
   11117 		new_readreg = wm_gmii_hv_readreg;
   11118 		new_writereg = wm_gmii_hv_writereg;
   11119 	}
   11120 
   11121 	/* Diag output */
   11122 	if (dodiag) {
   11123 		if (doubt_phytype != WMPHY_UNKNOWN)
   11124 			aprint_error_dev(dev, "Assumed new PHY type was "
   11125 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   11126 			    new_phytype);
   11127 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11128 		    && (sc->sc_phytype != new_phytype))
   11129 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   11130 			    "was incorrect. New PHY type = %u\n",
   11131 			    sc->sc_phytype, new_phytype);
   11132 
   11133 		if ((mii->mii_readreg != NULL) &&
   11134 		    (new_phytype == WMPHY_UNKNOWN))
   11135 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   11136 
   11137 		if ((mii->mii_readreg != NULL) &&
   11138 		    (mii->mii_readreg != new_readreg))
   11139 			aprint_error_dev(dev, "Previously assumed PHY "
   11140 			    "read/write function was incorrect.\n");
   11141 	}
   11142 
   11143 	/* Update now */
   11144 	sc->sc_phytype = new_phytype;
   11145 	mii->mii_readreg = new_readreg;
   11146 	mii->mii_writereg = new_writereg;
   11147 	if (new_readreg == wm_gmii_hv_readreg) {
   11148 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   11149 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   11150 	} else if (new_readreg == wm_sgmii_readreg) {
   11151 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   11152 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   11153 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   11154 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   11155 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   11156 	}
   11157 }
   11158 
   11159 /*
   11160  * wm_get_phy_id_82575:
   11161  *
   11162  * Return PHY ID. Return -1 if it failed.
   11163  */
   11164 static int
   11165 wm_get_phy_id_82575(struct wm_softc *sc)
   11166 {
   11167 	uint32_t reg;
   11168 	int phyid = -1;
   11169 
   11170 	/* XXX */
   11171 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   11172 		return -1;
   11173 
   11174 	if (wm_sgmii_uses_mdio(sc)) {
   11175 		switch (sc->sc_type) {
   11176 		case WM_T_82575:
   11177 		case WM_T_82576:
   11178 			reg = CSR_READ(sc, WMREG_MDIC);
   11179 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   11180 			break;
   11181 		case WM_T_82580:
   11182 		case WM_T_I350:
   11183 		case WM_T_I354:
   11184 		case WM_T_I210:
   11185 		case WM_T_I211:
   11186 			reg = CSR_READ(sc, WMREG_MDICNFG);
   11187 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   11188 			break;
   11189 		default:
   11190 			return -1;
   11191 		}
   11192 	}
   11193 
   11194 	return phyid;
   11195 }
   11196 
   11197 /*
   11198  * wm_gmii_mediainit:
   11199  *
   11200  *	Initialize media for use on 1000BASE-T devices.
   11201  */
   11202 static void
   11203 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   11204 {
   11205 	device_t dev = sc->sc_dev;
   11206 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11207 	struct mii_data *mii = &sc->sc_mii;
   11208 
   11209 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11210 		device_xname(sc->sc_dev), __func__));
   11211 
   11212 	/* We have GMII. */
   11213 	sc->sc_flags |= WM_F_HAS_MII;
   11214 
   11215 	if (sc->sc_type == WM_T_80003)
   11216 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11217 	else
   11218 		sc->sc_tipg = TIPG_1000T_DFLT;
   11219 
   11220 	/*
   11221 	 * Let the chip set speed/duplex on its own based on
   11222 	 * signals from the PHY.
   11223 	 * XXXbouyer - I'm not sure this is right for the 80003,
   11224 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   11225 	 */
   11226 	sc->sc_ctrl |= CTRL_SLU;
   11227 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11228 
   11229 	/* Initialize our media structures and probe the GMII. */
   11230 	mii->mii_ifp = ifp;
   11231 
   11232 	mii->mii_statchg = wm_gmii_statchg;
   11233 
   11234 	/* get PHY control from SMBus to PCIe */
   11235 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   11236 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   11237 	    || (sc->sc_type == WM_T_PCH_CNP))
   11238 		wm_init_phy_workarounds_pchlan(sc);
   11239 
   11240 	wm_gmii_reset(sc);
   11241 
   11242 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11243 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   11244 	    wm_gmii_mediastatus, sc->sc_core_lock);
   11245 
   11246 	/* Setup internal SGMII PHY for SFP */
   11247 	wm_sgmii_sfp_preconfig(sc);
   11248 
   11249 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   11250 	    || (sc->sc_type == WM_T_82580)
   11251 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   11252 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   11253 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   11254 			/* Attach only one port */
   11255 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   11256 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11257 		} else {
   11258 			int i, id;
   11259 			uint32_t ctrl_ext;
   11260 
   11261 			id = wm_get_phy_id_82575(sc);
   11262 			if (id != -1) {
   11263 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   11264 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   11265 			}
   11266 			if ((id == -1)
   11267 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11268 				/* Power on sgmii phy if it is disabled */
   11269 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11270 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   11271 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   11272 				CSR_WRITE_FLUSH(sc);
   11273 				delay(300*1000); /* XXX too long */
   11274 
   11275 				/*
   11276 				 * From 1 to 8.
   11277 				 *
   11278 				 * I2C access fails with I2C register's ERROR
   11279 				 * bit set, so prevent error message while
   11280 				 * scanning.
   11281 				 */
   11282 				sc->phy.no_errprint = true;
   11283 				for (i = 1; i < 8; i++)
   11284 					mii_attach(sc->sc_dev, &sc->sc_mii,
   11285 					    0xffffffff, i, MII_OFFSET_ANY,
   11286 					    MIIF_DOPAUSE);
   11287 				sc->phy.no_errprint = false;
   11288 
   11289 				/* Restore previous sfp cage power state */
   11290 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11291 			}
   11292 		}
   11293 	} else
   11294 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11295 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11296 
   11297 	/*
   11298 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   11299 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   11300 	 */
   11301 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   11302 		|| (sc->sc_type == WM_T_PCH_SPT)
   11303 		|| (sc->sc_type == WM_T_PCH_CNP))
   11304 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11305 		wm_set_mdio_slow_mode_hv(sc);
   11306 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11307 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11308 	}
   11309 
   11310 	/*
   11311 	 * (For ICH8 variants)
   11312 	 * If PHY detection failed, use BM's r/w function and retry.
   11313 	 */
   11314 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11315 		/* if failed, retry with *_bm_* */
   11316 		aprint_verbose_dev(dev, "Assumed PHY access function "
   11317 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   11318 		    sc->sc_phytype);
   11319 		sc->sc_phytype = WMPHY_BM;
   11320 		mii->mii_readreg = wm_gmii_bm_readreg;
   11321 		mii->mii_writereg = wm_gmii_bm_writereg;
   11322 
   11323 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11324 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11325 	}
   11326 
   11327 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11328 		/* Any PHY wasn't found */
   11329 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   11330 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   11331 		sc->sc_phytype = WMPHY_NONE;
   11332 	} else {
   11333 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   11334 
   11335 		/*
   11336 		 * PHY found! Check PHY type again by the second call of
   11337 		 * wm_gmii_setup_phytype.
   11338 		 */
   11339 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   11340 		    child->mii_mpd_model);
   11341 
   11342 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   11343 	}
   11344 }
   11345 
   11346 /*
   11347  * wm_gmii_mediachange:	[ifmedia interface function]
   11348  *
   11349  *	Set hardware to newly-selected media on a 1000BASE-T device.
   11350  */
   11351 static int
   11352 wm_gmii_mediachange(struct ifnet *ifp)
   11353 {
   11354 	struct wm_softc *sc = ifp->if_softc;
   11355 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11356 	uint32_t reg;
   11357 	int rc;
   11358 
   11359 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11360 		device_xname(sc->sc_dev), __func__));
   11361 
   11362 	KASSERT(mutex_owned(sc->sc_core_lock));
   11363 
   11364 	if ((sc->sc_if_flags & IFF_UP) == 0)
   11365 		return 0;
   11366 
   11367 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   11368 	if ((sc->sc_type == WM_T_82580)
   11369 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   11370 	    || (sc->sc_type == WM_T_I211)) {
   11371 		reg = CSR_READ(sc, WMREG_PHPM);
   11372 		reg &= ~PHPM_GO_LINK_D;
   11373 		CSR_WRITE(sc, WMREG_PHPM, reg);
   11374 	}
   11375 
   11376 	/* Disable D0 LPLU. */
   11377 	wm_lplu_d0_disable(sc);
   11378 
   11379 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   11380 	sc->sc_ctrl |= CTRL_SLU;
   11381 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11382 	    || (sc->sc_type > WM_T_82543)) {
   11383 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   11384 	} else {
   11385 		sc->sc_ctrl &= ~CTRL_ASDE;
   11386 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   11387 		if (ife->ifm_media & IFM_FDX)
   11388 			sc->sc_ctrl |= CTRL_FD;
   11389 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   11390 		case IFM_10_T:
   11391 			sc->sc_ctrl |= CTRL_SPEED_10;
   11392 			break;
   11393 		case IFM_100_TX:
   11394 			sc->sc_ctrl |= CTRL_SPEED_100;
   11395 			break;
   11396 		case IFM_1000_T:
   11397 			sc->sc_ctrl |= CTRL_SPEED_1000;
   11398 			break;
   11399 		case IFM_NONE:
   11400 			/* There is no specific setting for IFM_NONE */
   11401 			break;
   11402 		default:
   11403 			panic("wm_gmii_mediachange: bad media 0x%x",
   11404 			    ife->ifm_media);
   11405 		}
   11406 	}
   11407 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11408 	CSR_WRITE_FLUSH(sc);
   11409 
   11410 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11411 		wm_serdes_mediachange(ifp);
   11412 
   11413 	if (sc->sc_type <= WM_T_82543)
   11414 		wm_gmii_reset(sc);
   11415 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   11416 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   11417 		/* allow time for SFP cage time to power up phy */
   11418 		delay(300 * 1000);
   11419 		wm_gmii_reset(sc);
   11420 	}
   11421 
   11422 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   11423 		return 0;
   11424 	return rc;
   11425 }
   11426 
   11427 /*
   11428  * wm_gmii_mediastatus:	[ifmedia interface function]
   11429  *
   11430  *	Get the current interface media status on a 1000BASE-T device.
   11431  */
   11432 static void
   11433 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11434 {
   11435 	struct wm_softc *sc = ifp->if_softc;
   11436 
   11437 	KASSERT(mutex_owned(sc->sc_core_lock));
   11438 
   11439 	ether_mediastatus(ifp, ifmr);
   11440 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11441 	    | sc->sc_flowflags;
   11442 }
   11443 
   11444 #define	MDI_IO		CTRL_SWDPIN(2)
   11445 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   11446 #define	MDI_CLK		CTRL_SWDPIN(3)
   11447 
   11448 static void
   11449 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   11450 {
   11451 	uint32_t i, v;
   11452 
   11453 	v = CSR_READ(sc, WMREG_CTRL);
   11454 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11455 	v |= MDI_DIR | CTRL_SWDPIO(3);
   11456 
   11457 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   11458 		if (data & i)
   11459 			v |= MDI_IO;
   11460 		else
   11461 			v &= ~MDI_IO;
   11462 		CSR_WRITE(sc, WMREG_CTRL, v);
   11463 		CSR_WRITE_FLUSH(sc);
   11464 		delay(10);
   11465 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11466 		CSR_WRITE_FLUSH(sc);
   11467 		delay(10);
   11468 		CSR_WRITE(sc, WMREG_CTRL, v);
   11469 		CSR_WRITE_FLUSH(sc);
   11470 		delay(10);
   11471 	}
   11472 }
   11473 
   11474 static uint16_t
   11475 wm_i82543_mii_recvbits(struct wm_softc *sc)
   11476 {
   11477 	uint32_t v, i;
   11478 	uint16_t data = 0;
   11479 
   11480 	v = CSR_READ(sc, WMREG_CTRL);
   11481 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11482 	v |= CTRL_SWDPIO(3);
   11483 
   11484 	CSR_WRITE(sc, WMREG_CTRL, v);
   11485 	CSR_WRITE_FLUSH(sc);
   11486 	delay(10);
   11487 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11488 	CSR_WRITE_FLUSH(sc);
   11489 	delay(10);
   11490 	CSR_WRITE(sc, WMREG_CTRL, v);
   11491 	CSR_WRITE_FLUSH(sc);
   11492 	delay(10);
   11493 
   11494 	for (i = 0; i < 16; i++) {
   11495 		data <<= 1;
   11496 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11497 		CSR_WRITE_FLUSH(sc);
   11498 		delay(10);
   11499 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   11500 			data |= 1;
   11501 		CSR_WRITE(sc, WMREG_CTRL, v);
   11502 		CSR_WRITE_FLUSH(sc);
   11503 		delay(10);
   11504 	}
   11505 
   11506 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11507 	CSR_WRITE_FLUSH(sc);
   11508 	delay(10);
   11509 	CSR_WRITE(sc, WMREG_CTRL, v);
   11510 	CSR_WRITE_FLUSH(sc);
   11511 	delay(10);
   11512 
   11513 	return data;
   11514 }
   11515 
   11516 #undef MDI_IO
   11517 #undef MDI_DIR
   11518 #undef MDI_CLK
   11519 
   11520 /*
   11521  * wm_gmii_i82543_readreg:	[mii interface function]
   11522  *
   11523  *	Read a PHY register on the GMII (i82543 version).
   11524  */
   11525 static int
   11526 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11527 {
   11528 	struct wm_softc *sc = device_private(dev);
   11529 
   11530 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11531 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   11532 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   11533 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   11534 
   11535 	DPRINTF(sc, WM_DEBUG_GMII,
   11536 	    ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   11537 		device_xname(dev), phy, reg, *val));
   11538 
   11539 	return 0;
   11540 }
   11541 
   11542 /*
   11543  * wm_gmii_i82543_writereg:	[mii interface function]
   11544  *
   11545  *	Write a PHY register on the GMII (i82543 version).
   11546  */
   11547 static int
   11548 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   11549 {
   11550 	struct wm_softc *sc = device_private(dev);
   11551 
   11552 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11553 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   11554 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   11555 	    (MII_COMMAND_START << 30), 32);
   11556 
   11557 	return 0;
   11558 }
   11559 
   11560 /*
   11561  * wm_gmii_mdic_readreg:	[mii interface function]
   11562  *
   11563  *	Read a PHY register on the GMII.
   11564  */
   11565 static int
   11566 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11567 {
   11568 	struct wm_softc *sc = device_private(dev);
   11569 	uint32_t mdic = 0;
   11570 	int i;
   11571 
   11572 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11573 	    && (reg > MII_ADDRMASK)) {
   11574 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11575 		    __func__, sc->sc_phytype, reg);
   11576 		reg &= MII_ADDRMASK;
   11577 	}
   11578 
   11579 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   11580 	    MDIC_REGADD(reg));
   11581 
   11582 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11583 		delay(50);
   11584 		mdic = CSR_READ(sc, WMREG_MDIC);
   11585 		if (mdic & MDIC_READY)
   11586 			break;
   11587 	}
   11588 
   11589 	if ((mdic & MDIC_READY) == 0) {
   11590 		DPRINTF(sc, WM_DEBUG_GMII,
   11591 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   11592 			device_xname(dev), phy, reg));
   11593 		return ETIMEDOUT;
   11594 	} else if (mdic & MDIC_E) {
   11595 		/* This is normal if no PHY is present. */
   11596 		DPRINTF(sc, WM_DEBUG_GMII,
   11597 		    ("%s: MDIC read error: phy %d reg %d\n",
   11598 			device_xname(sc->sc_dev), phy, reg));
   11599 		return -1;
   11600 	} else
   11601 		*val = MDIC_DATA(mdic);
   11602 
   11603 	/*
   11604 	 * Allow some time after each MDIC transaction to avoid
   11605 	 * reading duplicate data in the next MDIC transaction.
   11606 	 */
   11607 	if (sc->sc_type == WM_T_PCH2)
   11608 		delay(100);
   11609 
   11610 	return 0;
   11611 }
   11612 
   11613 /*
   11614  * wm_gmii_mdic_writereg:	[mii interface function]
   11615  *
   11616  *	Write a PHY register on the GMII.
   11617  */
   11618 static int
   11619 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   11620 {
   11621 	struct wm_softc *sc = device_private(dev);
   11622 	uint32_t mdic = 0;
   11623 	int i;
   11624 
   11625 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11626 	    && (reg > MII_ADDRMASK)) {
   11627 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11628 		    __func__, sc->sc_phytype, reg);
   11629 		reg &= MII_ADDRMASK;
   11630 	}
   11631 
   11632 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   11633 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   11634 
   11635 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11636 		delay(50);
   11637 		mdic = CSR_READ(sc, WMREG_MDIC);
   11638 		if (mdic & MDIC_READY)
   11639 			break;
   11640 	}
   11641 
   11642 	if ((mdic & MDIC_READY) == 0) {
   11643 		DPRINTF(sc, WM_DEBUG_GMII,
   11644 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   11645 			device_xname(dev), phy, reg));
   11646 		return ETIMEDOUT;
   11647 	} else if (mdic & MDIC_E) {
   11648 		DPRINTF(sc, WM_DEBUG_GMII,
   11649 		    ("%s: MDIC write error: phy %d reg %d\n",
   11650 			device_xname(dev), phy, reg));
   11651 		return -1;
   11652 	}
   11653 
   11654 	/*
   11655 	 * Allow some time after each MDIC transaction to avoid
   11656 	 * reading duplicate data in the next MDIC transaction.
   11657 	 */
   11658 	if (sc->sc_type == WM_T_PCH2)
   11659 		delay(100);
   11660 
   11661 	return 0;
   11662 }
   11663 
   11664 /*
   11665  * wm_gmii_i82544_readreg:	[mii interface function]
   11666  *
   11667  *	Read a PHY register on the GMII.
   11668  */
   11669 static int
   11670 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11671 {
   11672 	struct wm_softc *sc = device_private(dev);
   11673 	int rv;
   11674 
   11675 	rv = sc->phy.acquire(sc);
   11676 	if (rv != 0) {
   11677 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11678 		return rv;
   11679 	}
   11680 
   11681 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   11682 
   11683 	sc->phy.release(sc);
   11684 
   11685 	return rv;
   11686 }
   11687 
   11688 static int
   11689 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11690 {
   11691 	struct wm_softc *sc = device_private(dev);
   11692 	int rv;
   11693 
   11694 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11695 		switch (sc->sc_phytype) {
   11696 		case WMPHY_IGP:
   11697 		case WMPHY_IGP_2:
   11698 		case WMPHY_IGP_3:
   11699 			rv = wm_gmii_mdic_writereg(dev, phy,
   11700 			    IGPHY_PAGE_SELECT, reg);
   11701 			if (rv != 0)
   11702 				return rv;
   11703 			break;
   11704 		default:
   11705 #ifdef WM_DEBUG
   11706 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   11707 			    __func__, sc->sc_phytype, reg);
   11708 #endif
   11709 			break;
   11710 		}
   11711 	}
   11712 
   11713 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11714 }
   11715 
   11716 /*
   11717  * wm_gmii_i82544_writereg:	[mii interface function]
   11718  *
   11719  *	Write a PHY register on the GMII.
   11720  */
   11721 static int
   11722 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   11723 {
   11724 	struct wm_softc *sc = device_private(dev);
   11725 	int rv;
   11726 
   11727 	rv = sc->phy.acquire(sc);
   11728 	if (rv != 0) {
   11729 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11730 		return rv;
   11731 	}
   11732 
   11733 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   11734 	sc->phy.release(sc);
   11735 
   11736 	return rv;
   11737 }
   11738 
   11739 static int
   11740 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11741 {
   11742 	struct wm_softc *sc = device_private(dev);
   11743 	int rv;
   11744 
   11745 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11746 		switch (sc->sc_phytype) {
   11747 		case WMPHY_IGP:
   11748 		case WMPHY_IGP_2:
   11749 		case WMPHY_IGP_3:
   11750 			rv = wm_gmii_mdic_writereg(dev, phy,
   11751 			    IGPHY_PAGE_SELECT, reg);
   11752 			if (rv != 0)
   11753 				return rv;
   11754 			break;
   11755 		default:
   11756 #ifdef WM_DEBUG
   11757 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   11758 			    __func__, sc->sc_phytype, reg);
   11759 #endif
   11760 			break;
   11761 		}
   11762 	}
   11763 
   11764 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11765 }
   11766 
   11767 /*
   11768  * wm_gmii_i80003_readreg:	[mii interface function]
   11769  *
   11770  *	Read a PHY register on the kumeran
   11771  * This could be handled by the PHY layer if we didn't have to lock the
   11772  * resource ...
   11773  */
   11774 static int
   11775 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11776 {
   11777 	struct wm_softc *sc = device_private(dev);
   11778 	int page_select;
   11779 	uint16_t temp, temp2;
   11780 	int rv;
   11781 
   11782 	if (phy != 1) /* Only one PHY on kumeran bus */
   11783 		return -1;
   11784 
   11785 	rv = sc->phy.acquire(sc);
   11786 	if (rv != 0) {
   11787 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11788 		return rv;
   11789 	}
   11790 
   11791 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11792 		page_select = GG82563_PHY_PAGE_SELECT;
   11793 	else {
   11794 		/*
   11795 		 * Use Alternative Page Select register to access registers
   11796 		 * 30 and 31.
   11797 		 */
   11798 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11799 	}
   11800 	temp = reg >> GG82563_PAGE_SHIFT;
   11801 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11802 		goto out;
   11803 
   11804 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11805 		/*
   11806 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11807 		 * register.
   11808 		 */
   11809 		delay(200);
   11810 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11811 		if ((rv != 0) || (temp2 != temp)) {
   11812 			device_printf(dev, "%s failed\n", __func__);
   11813 			rv = -1;
   11814 			goto out;
   11815 		}
   11816 		delay(200);
   11817 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11818 		delay(200);
   11819 	} else
   11820 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11821 
   11822 out:
   11823 	sc->phy.release(sc);
   11824 	return rv;
   11825 }
   11826 
   11827 /*
   11828  * wm_gmii_i80003_writereg:	[mii interface function]
   11829  *
   11830  *	Write a PHY register on the kumeran.
   11831  * This could be handled by the PHY layer if we didn't have to lock the
   11832  * resource ...
   11833  */
   11834 static int
   11835 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   11836 {
   11837 	struct wm_softc *sc = device_private(dev);
   11838 	int page_select, rv;
   11839 	uint16_t temp, temp2;
   11840 
   11841 	if (phy != 1) /* Only one PHY on kumeran bus */
   11842 		return -1;
   11843 
   11844 	rv = sc->phy.acquire(sc);
   11845 	if (rv != 0) {
   11846 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11847 		return rv;
   11848 	}
   11849 
   11850 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11851 		page_select = GG82563_PHY_PAGE_SELECT;
   11852 	else {
   11853 		/*
   11854 		 * Use Alternative Page Select register to access registers
   11855 		 * 30 and 31.
   11856 		 */
   11857 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11858 	}
   11859 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   11860 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11861 		goto out;
   11862 
   11863 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11864 		/*
   11865 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11866 		 * register.
   11867 		 */
   11868 		delay(200);
   11869 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11870 		if ((rv != 0) || (temp2 != temp)) {
   11871 			device_printf(dev, "%s failed\n", __func__);
   11872 			rv = -1;
   11873 			goto out;
   11874 		}
   11875 		delay(200);
   11876 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11877 		delay(200);
   11878 	} else
   11879 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11880 
   11881 out:
   11882 	sc->phy.release(sc);
   11883 	return rv;
   11884 }
   11885 
   11886 /*
   11887  * wm_gmii_bm_readreg:	[mii interface function]
   11888  *
   11889  *	Read a PHY register on the kumeran
   11890  * This could be handled by the PHY layer if we didn't have to lock the
   11891  * resource ...
   11892  */
   11893 static int
   11894 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11895 {
   11896 	struct wm_softc *sc = device_private(dev);
   11897 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11898 	int rv;
   11899 
   11900 	rv = sc->phy.acquire(sc);
   11901 	if (rv != 0) {
   11902 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11903 		return rv;
   11904 	}
   11905 
   11906 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11907 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11908 		    || (reg == 31)) ? 1 : phy;
   11909 	/* Page 800 works differently than the rest so it has its own func */
   11910 	if (page == BM_WUC_PAGE) {
   11911 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11912 		goto release;
   11913 	}
   11914 
   11915 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11916 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11917 		    && (sc->sc_type != WM_T_82583))
   11918 			rv = wm_gmii_mdic_writereg(dev, phy,
   11919 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11920 		else
   11921 			rv = wm_gmii_mdic_writereg(dev, phy,
   11922 			    BME1000_PHY_PAGE_SELECT, page);
   11923 		if (rv != 0)
   11924 			goto release;
   11925 	}
   11926 
   11927 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11928 
   11929 release:
   11930 	sc->phy.release(sc);
   11931 	return rv;
   11932 }
   11933 
   11934 /*
   11935  * wm_gmii_bm_writereg:	[mii interface function]
   11936  *
   11937  *	Write a PHY register on the kumeran.
   11938  * This could be handled by the PHY layer if we didn't have to lock the
   11939  * resource ...
   11940  */
   11941 static int
   11942 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   11943 {
   11944 	struct wm_softc *sc = device_private(dev);
   11945 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11946 	int rv;
   11947 
   11948 	rv = sc->phy.acquire(sc);
   11949 	if (rv != 0) {
   11950 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11951 		return rv;
   11952 	}
   11953 
   11954 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11955 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11956 		    || (reg == 31)) ? 1 : phy;
   11957 	/* Page 800 works differently than the rest so it has its own func */
   11958 	if (page == BM_WUC_PAGE) {
   11959 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   11960 		goto release;
   11961 	}
   11962 
   11963 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11964 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11965 		    && (sc->sc_type != WM_T_82583))
   11966 			rv = wm_gmii_mdic_writereg(dev, phy,
   11967 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11968 		else
   11969 			rv = wm_gmii_mdic_writereg(dev, phy,
   11970 			    BME1000_PHY_PAGE_SELECT, page);
   11971 		if (rv != 0)
   11972 			goto release;
   11973 	}
   11974 
   11975 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11976 
   11977 release:
   11978 	sc->phy.release(sc);
   11979 	return rv;
   11980 }
   11981 
   11982 /*
   11983  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   11984  *  @dev: pointer to the HW structure
   11985  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   11986  *
   11987  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   11988  *  address to store contents of the BM_WUC_ENABLE_REG register.
   11989  */
   11990 static int
   11991 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11992 {
   11993 #ifdef WM_DEBUG
   11994 	struct wm_softc *sc = device_private(dev);
   11995 #endif
   11996 	uint16_t temp;
   11997 	int rv;
   11998 
   11999 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   12000 		device_xname(dev), __func__));
   12001 
   12002 	if (!phy_regp)
   12003 		return -1;
   12004 
   12005 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   12006 
   12007 	/* Select Port Control Registers page */
   12008 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12009 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   12010 	if (rv != 0)
   12011 		return rv;
   12012 
   12013 	/* Read WUCE and save it */
   12014 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   12015 	if (rv != 0)
   12016 		return rv;
   12017 
   12018 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   12019 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   12020 	 */
   12021 	temp = *phy_regp;
   12022 	temp |= BM_WUC_ENABLE_BIT;
   12023 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   12024 
   12025 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   12026 		return rv;
   12027 
   12028 	/* Select Host Wakeup Registers page - caller now able to write
   12029 	 * registers on the Wakeup registers page
   12030 	 */
   12031 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12032 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   12033 }
   12034 
   12035 /*
   12036  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   12037  *  @dev: pointer to the HW structure
   12038  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   12039  *
   12040  *  Restore BM_WUC_ENABLE_REG to its original value.
   12041  *
   12042  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   12043  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   12044  *  caller.
   12045  */
   12046 static int
   12047 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   12048 {
   12049 #ifdef WM_DEBUG
   12050 	struct wm_softc *sc = device_private(dev);
   12051 #endif
   12052 
   12053 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   12054 		device_xname(dev), __func__));
   12055 
   12056 	if (!phy_regp)
   12057 		return -1;
   12058 
   12059 	/* Select Port Control Registers page */
   12060 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12061 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   12062 
   12063 	/* Restore 769.17 to its original value */
   12064 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   12065 
   12066 	return 0;
   12067 }
   12068 
   12069 /*
   12070  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   12071  *  @sc: pointer to the HW structure
   12072  *  @offset: register offset to be read or written
   12073  *  @val: pointer to the data to read or write
   12074  *  @rd: determines if operation is read or write
   12075  *  @page_set: BM_WUC_PAGE already set and access enabled
   12076  *
   12077  *  Read the PHY register at offset and store the retrieved information in
   12078  *  data, or write data to PHY register at offset.  Note the procedure to
   12079  *  access the PHY wakeup registers is different than reading the other PHY
   12080  *  registers. It works as such:
   12081  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   12082  *  2) Set page to 800 for host (801 if we were manageability)
   12083  *  3) Write the address using the address opcode (0x11)
   12084  *  4) Read or write the data using the data opcode (0x12)
   12085  *  5) Restore 769.17.2 to its original value
   12086  *
   12087  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   12088  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   12089  *
   12090  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   12091  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   12092  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   12093  */
   12094 static int
   12095 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   12096     bool page_set)
   12097 {
   12098 	struct wm_softc *sc = device_private(dev);
   12099 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   12100 	uint16_t page = BM_PHY_REG_PAGE(offset);
   12101 	uint16_t wuce;
   12102 	int rv = 0;
   12103 
   12104 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12105 		device_xname(dev), __func__));
   12106 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   12107 	if ((sc->sc_type == WM_T_PCH)
   12108 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   12109 		device_printf(dev,
   12110 		    "Attempting to access page %d while gig enabled.\n", page);
   12111 	}
   12112 
   12113 	if (!page_set) {
   12114 		/* Enable access to PHY wakeup registers */
   12115 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   12116 		if (rv != 0) {
   12117 			device_printf(dev,
   12118 			    "%s: Could not enable PHY wakeup reg access\n",
   12119 			    __func__);
   12120 			return rv;
   12121 		}
   12122 	}
   12123 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   12124 		device_xname(sc->sc_dev), __func__, page, regnum));
   12125 
   12126 	/*
   12127 	 * 2) Access PHY wakeup register.
   12128 	 * See wm_access_phy_wakeup_reg_bm.
   12129 	 */
   12130 
   12131 	/* Write the Wakeup register page offset value using opcode 0x11 */
   12132 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   12133 	if (rv != 0)
   12134 		return rv;
   12135 
   12136 	if (rd) {
   12137 		/* Read the Wakeup register page value using opcode 0x12 */
   12138 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   12139 	} else {
   12140 		/* Write the Wakeup register page value using opcode 0x12 */
   12141 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   12142 	}
   12143 	if (rv != 0)
   12144 		return rv;
   12145 
   12146 	if (!page_set)
   12147 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   12148 
   12149 	return rv;
   12150 }
   12151 
   12152 /*
   12153  * wm_gmii_hv_readreg:	[mii interface function]
   12154  *
   12155  *	Read a PHY register on the kumeran
   12156  * This could be handled by the PHY layer if we didn't have to lock the
   12157  * resource ...
   12158  */
   12159 static int
   12160 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12161 {
   12162 	struct wm_softc *sc = device_private(dev);
   12163 	int rv;
   12164 
   12165 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12166 		device_xname(dev), __func__));
   12167 
   12168 	rv = sc->phy.acquire(sc);
   12169 	if (rv != 0) {
   12170 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12171 		return rv;
   12172 	}
   12173 
   12174 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   12175 	sc->phy.release(sc);
   12176 	return rv;
   12177 }
   12178 
   12179 static int
   12180 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12181 {
   12182 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12183 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12184 	int rv;
   12185 
   12186 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12187 
   12188 	/* Page 800 works differently than the rest so it has its own func */
   12189 	if (page == BM_WUC_PAGE)
   12190 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   12191 
   12192 	/*
   12193 	 * Lower than page 768 works differently than the rest so it has its
   12194 	 * own func
   12195 	 */
   12196 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12197 		device_printf(dev, "gmii_hv_readreg!!!\n");
   12198 		return -1;
   12199 	}
   12200 
   12201 	/*
   12202 	 * XXX I21[789] documents say that the SMBus Address register is at
   12203 	 * PHY address 01, Page 0 (not 768), Register 26.
   12204 	 */
   12205 	if (page == HV_INTC_FC_PAGE_START)
   12206 		page = 0;
   12207 
   12208 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12209 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12210 		    page << BME1000_PAGE_SHIFT);
   12211 		if (rv != 0)
   12212 			return rv;
   12213 	}
   12214 
   12215 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   12216 }
   12217 
   12218 /*
   12219  * wm_gmii_hv_writereg:	[mii interface function]
   12220  *
   12221  *	Write a PHY register on the kumeran.
   12222  * This could be handled by the PHY layer if we didn't have to lock the
   12223  * resource ...
   12224  */
   12225 static int
   12226 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   12227 {
   12228 	struct wm_softc *sc = device_private(dev);
   12229 	int rv;
   12230 
   12231 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12232 		device_xname(dev), __func__));
   12233 
   12234 	rv = sc->phy.acquire(sc);
   12235 	if (rv != 0) {
   12236 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12237 		return rv;
   12238 	}
   12239 
   12240 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   12241 	sc->phy.release(sc);
   12242 
   12243 	return rv;
   12244 }
   12245 
   12246 static int
   12247 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12248 {
   12249 	struct wm_softc *sc = device_private(dev);
   12250 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12251 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12252 	int rv;
   12253 
   12254 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12255 
   12256 	/* Page 800 works differently than the rest so it has its own func */
   12257 	if (page == BM_WUC_PAGE)
   12258 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   12259 		    false);
   12260 
   12261 	/*
   12262 	 * Lower than page 768 works differently than the rest so it has its
   12263 	 * own func
   12264 	 */
   12265 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12266 		device_printf(dev, "gmii_hv_writereg!!!\n");
   12267 		return -1;
   12268 	}
   12269 
   12270 	{
   12271 		/*
   12272 		 * XXX I21[789] documents say that the SMBus Address register
   12273 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   12274 		 */
   12275 		if (page == HV_INTC_FC_PAGE_START)
   12276 			page = 0;
   12277 
   12278 		/*
   12279 		 * XXX Workaround MDIO accesses being disabled after entering
   12280 		 * IEEE Power Down (whenever bit 11 of the PHY control
   12281 		 * register is set)
   12282 		 */
   12283 		if (sc->sc_phytype == WMPHY_82578) {
   12284 			struct mii_softc *child;
   12285 
   12286 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12287 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   12288 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   12289 			    && ((val & (1 << 11)) != 0)) {
   12290 				device_printf(dev, "XXX need workaround\n");
   12291 			}
   12292 		}
   12293 
   12294 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12295 			rv = wm_gmii_mdic_writereg(dev, 1,
   12296 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12297 			if (rv != 0)
   12298 				return rv;
   12299 		}
   12300 	}
   12301 
   12302 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   12303 }
   12304 
   12305 /*
   12306  * wm_gmii_82580_readreg:	[mii interface function]
   12307  *
   12308  *	Read a PHY register on the 82580 and I350.
   12309  * This could be handled by the PHY layer if we didn't have to lock the
   12310  * resource ...
   12311  */
   12312 static int
   12313 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12314 {
   12315 	struct wm_softc *sc = device_private(dev);
   12316 	int rv;
   12317 
   12318 	rv = sc->phy.acquire(sc);
   12319 	if (rv != 0) {
   12320 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12321 		return rv;
   12322 	}
   12323 
   12324 #ifdef DIAGNOSTIC
   12325 	if (reg > MII_ADDRMASK) {
   12326 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12327 		    __func__, sc->sc_phytype, reg);
   12328 		reg &= MII_ADDRMASK;
   12329 	}
   12330 #endif
   12331 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   12332 
   12333 	sc->phy.release(sc);
   12334 	return rv;
   12335 }
   12336 
   12337 /*
   12338  * wm_gmii_82580_writereg:	[mii interface function]
   12339  *
   12340  *	Write a PHY register on the 82580 and I350.
   12341  * This could be handled by the PHY layer if we didn't have to lock the
   12342  * resource ...
   12343  */
   12344 static int
   12345 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   12346 {
   12347 	struct wm_softc *sc = device_private(dev);
   12348 	int rv;
   12349 
   12350 	rv = sc->phy.acquire(sc);
   12351 	if (rv != 0) {
   12352 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12353 		return rv;
   12354 	}
   12355 
   12356 #ifdef DIAGNOSTIC
   12357 	if (reg > MII_ADDRMASK) {
   12358 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12359 		    __func__, sc->sc_phytype, reg);
   12360 		reg &= MII_ADDRMASK;
   12361 	}
   12362 #endif
   12363 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   12364 
   12365 	sc->phy.release(sc);
   12366 	return rv;
   12367 }
   12368 
   12369 /*
   12370  * wm_gmii_gs40g_readreg:	[mii interface function]
   12371  *
   12372  *	Read a PHY register on the I2100 and I211.
   12373  * This could be handled by the PHY layer if we didn't have to lock the
   12374  * resource ...
   12375  */
   12376 static int
   12377 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12378 {
   12379 	struct wm_softc *sc = device_private(dev);
   12380 	int page, offset;
   12381 	int rv;
   12382 
   12383 	/* Acquire semaphore */
   12384 	rv = sc->phy.acquire(sc);
   12385 	if (rv != 0) {
   12386 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12387 		return rv;
   12388 	}
   12389 
   12390 	/* Page select */
   12391 	page = reg >> GS40G_PAGE_SHIFT;
   12392 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12393 	if (rv != 0)
   12394 		goto release;
   12395 
   12396 	/* Read reg */
   12397 	offset = reg & GS40G_OFFSET_MASK;
   12398 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   12399 
   12400 release:
   12401 	sc->phy.release(sc);
   12402 	return rv;
   12403 }
   12404 
   12405 /*
   12406  * wm_gmii_gs40g_writereg:	[mii interface function]
   12407  *
   12408  *	Write a PHY register on the I210 and I211.
   12409  * This could be handled by the PHY layer if we didn't have to lock the
   12410  * resource ...
   12411  */
   12412 static int
   12413 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   12414 {
   12415 	struct wm_softc *sc = device_private(dev);
   12416 	uint16_t page;
   12417 	int offset, rv;
   12418 
   12419 	/* Acquire semaphore */
   12420 	rv = sc->phy.acquire(sc);
   12421 	if (rv != 0) {
   12422 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12423 		return rv;
   12424 	}
   12425 
   12426 	/* Page select */
   12427 	page = reg >> GS40G_PAGE_SHIFT;
   12428 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12429 	if (rv != 0)
   12430 		goto release;
   12431 
   12432 	/* Write reg */
   12433 	offset = reg & GS40G_OFFSET_MASK;
   12434 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   12435 
   12436 release:
   12437 	/* Release semaphore */
   12438 	sc->phy.release(sc);
   12439 	return rv;
   12440 }
   12441 
   12442 /*
   12443  * wm_gmii_statchg:	[mii interface function]
   12444  *
   12445  *	Callback from MII layer when media changes.
   12446  */
   12447 static void
   12448 wm_gmii_statchg(struct ifnet *ifp)
   12449 {
   12450 	struct wm_softc *sc = ifp->if_softc;
   12451 	struct mii_data *mii = &sc->sc_mii;
   12452 
   12453 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   12454 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12455 	sc->sc_fcrtl &= ~FCRTL_XONE;
   12456 
   12457 	/* Get flow control negotiation result. */
   12458 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   12459 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   12460 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   12461 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   12462 	}
   12463 
   12464 	if (sc->sc_flowflags & IFM_FLOW) {
   12465 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   12466 			sc->sc_ctrl |= CTRL_TFCE;
   12467 			sc->sc_fcrtl |= FCRTL_XONE;
   12468 		}
   12469 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   12470 			sc->sc_ctrl |= CTRL_RFCE;
   12471 	}
   12472 
   12473 	if (mii->mii_media_active & IFM_FDX) {
   12474 		DPRINTF(sc, WM_DEBUG_LINK,
   12475 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   12476 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12477 	} else {
   12478 		DPRINTF(sc, WM_DEBUG_LINK,
   12479 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   12480 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12481 	}
   12482 
   12483 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12484 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12485 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12486 	    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12487 	if (sc->sc_type == WM_T_80003) {
   12488 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   12489 		case IFM_1000_T:
   12490 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12491 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   12492 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   12493 			break;
   12494 		default:
   12495 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12496 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   12497 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   12498 			break;
   12499 		}
   12500 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   12501 	}
   12502 }
   12503 
   12504 /* kumeran related (80003, ICH* and PCH*) */
   12505 
   12506 /*
   12507  * wm_kmrn_readreg:
   12508  *
   12509  *	Read a kumeran register
   12510  */
   12511 static int
   12512 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   12513 {
   12514 	int rv;
   12515 
   12516 	if (sc->sc_type == WM_T_80003)
   12517 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12518 	else
   12519 		rv = sc->phy.acquire(sc);
   12520 	if (rv != 0) {
   12521 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   12522 		    __func__);
   12523 		return rv;
   12524 	}
   12525 
   12526 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   12527 
   12528 	if (sc->sc_type == WM_T_80003)
   12529 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12530 	else
   12531 		sc->phy.release(sc);
   12532 
   12533 	return rv;
   12534 }
   12535 
   12536 static int
   12537 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   12538 {
   12539 
   12540 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12541 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   12542 	    KUMCTRLSTA_REN);
   12543 	CSR_WRITE_FLUSH(sc);
   12544 	delay(2);
   12545 
   12546 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   12547 
   12548 	return 0;
   12549 }
   12550 
   12551 /*
   12552  * wm_kmrn_writereg:
   12553  *
   12554  *	Write a kumeran register
   12555  */
   12556 static int
   12557 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   12558 {
   12559 	int rv;
   12560 
   12561 	if (sc->sc_type == WM_T_80003)
   12562 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12563 	else
   12564 		rv = sc->phy.acquire(sc);
   12565 	if (rv != 0) {
   12566 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   12567 		    __func__);
   12568 		return rv;
   12569 	}
   12570 
   12571 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   12572 
   12573 	if (sc->sc_type == WM_T_80003)
   12574 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12575 	else
   12576 		sc->phy.release(sc);
   12577 
   12578 	return rv;
   12579 }
   12580 
   12581 static int
   12582 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   12583 {
   12584 
   12585 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12586 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   12587 
   12588 	return 0;
   12589 }
   12590 
   12591 /*
   12592  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   12593  * This access method is different from IEEE MMD.
   12594  */
   12595 static int
   12596 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   12597 {
   12598 	struct wm_softc *sc = device_private(dev);
   12599 	int rv;
   12600 
   12601 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   12602 	if (rv != 0)
   12603 		return rv;
   12604 
   12605 	if (rd)
   12606 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   12607 	else
   12608 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   12609 	return rv;
   12610 }
   12611 
   12612 static int
   12613 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   12614 {
   12615 
   12616 	return wm_access_emi_reg_locked(dev, reg, val, true);
   12617 }
   12618 
   12619 static int
   12620 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   12621 {
   12622 
   12623 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   12624 }
   12625 
   12626 /* SGMII related */
   12627 
   12628 /*
   12629  * wm_sgmii_uses_mdio
   12630  *
   12631  * Check whether the transaction is to the internal PHY or the external
   12632  * MDIO interface. Return true if it's MDIO.
   12633  */
   12634 static bool
   12635 wm_sgmii_uses_mdio(struct wm_softc *sc)
   12636 {
   12637 	uint32_t reg;
   12638 	bool ismdio = false;
   12639 
   12640 	switch (sc->sc_type) {
   12641 	case WM_T_82575:
   12642 	case WM_T_82576:
   12643 		reg = CSR_READ(sc, WMREG_MDIC);
   12644 		ismdio = ((reg & MDIC_DEST) != 0);
   12645 		break;
   12646 	case WM_T_82580:
   12647 	case WM_T_I350:
   12648 	case WM_T_I354:
   12649 	case WM_T_I210:
   12650 	case WM_T_I211:
   12651 		reg = CSR_READ(sc, WMREG_MDICNFG);
   12652 		ismdio = ((reg & MDICNFG_DEST) != 0);
   12653 		break;
   12654 	default:
   12655 		break;
   12656 	}
   12657 
   12658 	return ismdio;
   12659 }
   12660 
   12661 /* Setup internal SGMII PHY for SFP */
   12662 static void
   12663 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
   12664 {
   12665 	uint16_t id1, id2, phyreg;
   12666 	int i, rv;
   12667 
   12668 	if (((sc->sc_flags & WM_F_SGMII) == 0)
   12669 	    || ((sc->sc_flags & WM_F_SFP) == 0))
   12670 		return;
   12671 
   12672 	for (i = 0; i < MII_NPHY; i++) {
   12673 		sc->phy.no_errprint = true;
   12674 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
   12675 		if (rv != 0)
   12676 			continue;
   12677 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
   12678 		if (rv != 0)
   12679 			continue;
   12680 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
   12681 			continue;
   12682 		sc->phy.no_errprint = false;
   12683 
   12684 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
   12685 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
   12686 		phyreg |= ESSR_SGMII_WOC_COPPER;
   12687 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
   12688 		break;
   12689 	}
   12690 
   12691 }
   12692 
   12693 /*
   12694  * wm_sgmii_readreg:	[mii interface function]
   12695  *
   12696  *	Read a PHY register on the SGMII
   12697  * This could be handled by the PHY layer if we didn't have to lock the
   12698  * resource ...
   12699  */
   12700 static int
   12701 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12702 {
   12703 	struct wm_softc *sc = device_private(dev);
   12704 	int rv;
   12705 
   12706 	rv = sc->phy.acquire(sc);
   12707 	if (rv != 0) {
   12708 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12709 		return rv;
   12710 	}
   12711 
   12712 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   12713 
   12714 	sc->phy.release(sc);
   12715 	return rv;
   12716 }
   12717 
   12718 static int
   12719 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12720 {
   12721 	struct wm_softc *sc = device_private(dev);
   12722 	uint32_t i2ccmd;
   12723 	int i, rv = 0;
   12724 
   12725 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12726 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12727 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12728 
   12729 	/* Poll the ready bit */
   12730 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12731 		delay(50);
   12732 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12733 		if (i2ccmd & I2CCMD_READY)
   12734 			break;
   12735 	}
   12736 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12737 		device_printf(dev, "I2CCMD Read did not complete\n");
   12738 		rv = ETIMEDOUT;
   12739 	}
   12740 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12741 		if (!sc->phy.no_errprint)
   12742 			device_printf(dev, "I2CCMD Error bit set\n");
   12743 		rv = EIO;
   12744 	}
   12745 
   12746 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   12747 
   12748 	return rv;
   12749 }
   12750 
   12751 /*
   12752  * wm_sgmii_writereg:	[mii interface function]
   12753  *
   12754  *	Write a PHY register on the SGMII.
   12755  * This could be handled by the PHY layer if we didn't have to lock the
   12756  * resource ...
   12757  */
   12758 static int
   12759 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   12760 {
   12761 	struct wm_softc *sc = device_private(dev);
   12762 	int rv;
   12763 
   12764 	rv = sc->phy.acquire(sc);
   12765 	if (rv != 0) {
   12766 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12767 		return rv;
   12768 	}
   12769 
   12770 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   12771 
   12772 	sc->phy.release(sc);
   12773 
   12774 	return rv;
   12775 }
   12776 
   12777 static int
   12778 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12779 {
   12780 	struct wm_softc *sc = device_private(dev);
   12781 	uint32_t i2ccmd;
   12782 	uint16_t swapdata;
   12783 	int rv = 0;
   12784 	int i;
   12785 
   12786 	/* Swap the data bytes for the I2C interface */
   12787 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   12788 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12789 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   12790 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12791 
   12792 	/* Poll the ready bit */
   12793 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12794 		delay(50);
   12795 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12796 		if (i2ccmd & I2CCMD_READY)
   12797 			break;
   12798 	}
   12799 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12800 		device_printf(dev, "I2CCMD Write did not complete\n");
   12801 		rv = ETIMEDOUT;
   12802 	}
   12803 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12804 		device_printf(dev, "I2CCMD Error bit set\n");
   12805 		rv = EIO;
   12806 	}
   12807 
   12808 	return rv;
   12809 }
   12810 
   12811 /* TBI related */
   12812 
   12813 static bool
   12814 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   12815 {
   12816 	bool sig;
   12817 
   12818 	sig = ctrl & CTRL_SWDPIN(1);
   12819 
   12820 	/*
   12821 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   12822 	 * detect a signal, 1 if they don't.
   12823 	 */
   12824 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   12825 		sig = !sig;
   12826 
   12827 	return sig;
   12828 }
   12829 
   12830 /*
   12831  * wm_tbi_mediainit:
   12832  *
   12833  *	Initialize media for use on 1000BASE-X devices.
   12834  */
   12835 static void
   12836 wm_tbi_mediainit(struct wm_softc *sc)
   12837 {
   12838 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12839 	const char *sep = "";
   12840 
   12841 	if (sc->sc_type < WM_T_82543)
   12842 		sc->sc_tipg = TIPG_WM_DFLT;
   12843 	else
   12844 		sc->sc_tipg = TIPG_LG_DFLT;
   12845 
   12846 	sc->sc_tbi_serdes_anegticks = 5;
   12847 
   12848 	/* Initialize our media structures */
   12849 	sc->sc_mii.mii_ifp = ifp;
   12850 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   12851 
   12852 	ifp->if_baudrate = IF_Gbps(1);
   12853 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   12854 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12855 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12856 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   12857 		    sc->sc_core_lock);
   12858 	} else {
   12859 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12860 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   12861 	}
   12862 
   12863 	/*
   12864 	 * SWD Pins:
   12865 	 *
   12866 	 *	0 = Link LED (output)
   12867 	 *	1 = Loss Of Signal (input)
   12868 	 */
   12869 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   12870 
   12871 	/* XXX Perhaps this is only for TBI */
   12872 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12873 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   12874 
   12875 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   12876 		sc->sc_ctrl &= ~CTRL_LRST;
   12877 
   12878 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12879 
   12880 #define	ADD(ss, mm, dd)							  \
   12881 do {									  \
   12882 	aprint_normal("%s%s", sep, ss);					  \
   12883 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   12884 	sep = ", ";							  \
   12885 } while (/*CONSTCOND*/0)
   12886 
   12887 	aprint_normal_dev(sc->sc_dev, "");
   12888 
   12889 	if (sc->sc_type == WM_T_I354) {
   12890 		uint32_t status;
   12891 
   12892 		status = CSR_READ(sc, WMREG_STATUS);
   12893 		if (((status & STATUS_2P5_SKU) != 0)
   12894 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12895 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   12896 		} else
   12897 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   12898 	} else if (sc->sc_type == WM_T_82545) {
   12899 		/* Only 82545 is LX (XXX except SFP) */
   12900 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12901 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12902 	} else if (sc->sc_sfptype != 0) {
   12903 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   12904 		switch (sc->sc_sfptype) {
   12905 		default:
   12906 		case SFF_SFP_ETH_FLAGS_1000SX:
   12907 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12908 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12909 			break;
   12910 		case SFF_SFP_ETH_FLAGS_1000LX:
   12911 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12912 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12913 			break;
   12914 		case SFF_SFP_ETH_FLAGS_1000CX:
   12915 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   12916 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   12917 			break;
   12918 		case SFF_SFP_ETH_FLAGS_1000T:
   12919 			ADD("1000baseT", IFM_1000_T, 0);
   12920 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   12921 			break;
   12922 		case SFF_SFP_ETH_FLAGS_100FX:
   12923 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   12924 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   12925 			break;
   12926 		}
   12927 	} else {
   12928 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12929 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12930 	}
   12931 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   12932 	aprint_normal("\n");
   12933 
   12934 #undef ADD
   12935 
   12936 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   12937 }
   12938 
   12939 /*
   12940  * wm_tbi_mediachange:	[ifmedia interface function]
   12941  *
   12942  *	Set hardware to newly-selected media on a 1000BASE-X device.
   12943  */
   12944 static int
   12945 wm_tbi_mediachange(struct ifnet *ifp)
   12946 {
   12947 	struct wm_softc *sc = ifp->if_softc;
   12948 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12949 	uint32_t status, ctrl;
   12950 	bool signal;
   12951 	int i;
   12952 
   12953 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   12954 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12955 		/* XXX need some work for >= 82571 and < 82575 */
   12956 		if (sc->sc_type < WM_T_82575)
   12957 			return 0;
   12958 	}
   12959 
   12960 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12961 	    || (sc->sc_type >= WM_T_82575))
   12962 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12963 
   12964 	sc->sc_ctrl &= ~CTRL_LRST;
   12965 	sc->sc_txcw = TXCW_ANE;
   12966 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12967 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   12968 	else if (ife->ifm_media & IFM_FDX)
   12969 		sc->sc_txcw |= TXCW_FD;
   12970 	else
   12971 		sc->sc_txcw |= TXCW_HD;
   12972 
   12973 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   12974 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   12975 
   12976 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   12977 		device_xname(sc->sc_dev), sc->sc_txcw));
   12978 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12979 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12980 	CSR_WRITE_FLUSH(sc);
   12981 	delay(1000);
   12982 
   12983 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12984 	signal = wm_tbi_havesignal(sc, ctrl);
   12985 
   12986 	DPRINTF(sc, WM_DEBUG_LINK,
   12987 	    ("%s: signal = %d\n", device_xname(sc->sc_dev), signal));
   12988 
   12989 	if (signal) {
   12990 		/* Have signal; wait for the link to come up. */
   12991 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   12992 			delay(10000);
   12993 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   12994 				break;
   12995 		}
   12996 
   12997 		DPRINTF(sc, WM_DEBUG_LINK,
   12998 		    ("%s: i = %d after waiting for link\n",
   12999 			device_xname(sc->sc_dev), i));
   13000 
   13001 		status = CSR_READ(sc, WMREG_STATUS);
   13002 		DPRINTF(sc, WM_DEBUG_LINK,
   13003 		    ("%s: status after final read = 0x%x, STATUS_LU = %#"
   13004 			__PRIxBIT "\n",
   13005 			device_xname(sc->sc_dev), status, STATUS_LU));
   13006 		if (status & STATUS_LU) {
   13007 			/* Link is up. */
   13008 			DPRINTF(sc, WM_DEBUG_LINK,
   13009 			    ("%s: LINK: set media -> link up %s\n",
   13010 				device_xname(sc->sc_dev),
   13011 				(status & STATUS_FD) ? "FDX" : "HDX"));
   13012 
   13013 			/*
   13014 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   13015 			 * so we should update sc->sc_ctrl
   13016 			 */
   13017 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   13018 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   13019 			sc->sc_fcrtl &= ~FCRTL_XONE;
   13020 			if (status & STATUS_FD)
   13021 				sc->sc_tctl |=
   13022 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   13023 			else
   13024 				sc->sc_tctl |=
   13025 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   13026 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   13027 				sc->sc_fcrtl |= FCRTL_XONE;
   13028 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   13029 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   13030 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   13031 			sc->sc_tbi_linkup = 1;
   13032 		} else {
   13033 			if (i == WM_LINKUP_TIMEOUT)
   13034 				wm_check_for_link(sc);
   13035 			/* Link is down. */
   13036 			DPRINTF(sc, WM_DEBUG_LINK,
   13037 			    ("%s: LINK: set media -> link down\n",
   13038 				device_xname(sc->sc_dev)));
   13039 			sc->sc_tbi_linkup = 0;
   13040 		}
   13041 	} else {
   13042 		DPRINTF(sc, WM_DEBUG_LINK,
   13043 		    ("%s: LINK: set media -> no signal\n",
   13044 			device_xname(sc->sc_dev)));
   13045 		sc->sc_tbi_linkup = 0;
   13046 	}
   13047 
   13048 	wm_tbi_serdes_set_linkled(sc);
   13049 
   13050 	return 0;
   13051 }
   13052 
   13053 /*
   13054  * wm_tbi_mediastatus:	[ifmedia interface function]
   13055  *
   13056  *	Get the current interface media status on a 1000BASE-X device.
   13057  */
   13058 static void
   13059 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13060 {
   13061 	struct wm_softc *sc = ifp->if_softc;
   13062 	uint32_t ctrl, status;
   13063 
   13064 	ifmr->ifm_status = IFM_AVALID;
   13065 	ifmr->ifm_active = IFM_ETHER;
   13066 
   13067 	status = CSR_READ(sc, WMREG_STATUS);
   13068 	if ((status & STATUS_LU) == 0) {
   13069 		ifmr->ifm_active |= IFM_NONE;
   13070 		return;
   13071 	}
   13072 
   13073 	ifmr->ifm_status |= IFM_ACTIVE;
   13074 	/* Only 82545 is LX */
   13075 	if (sc->sc_type == WM_T_82545)
   13076 		ifmr->ifm_active |= IFM_1000_LX;
   13077 	else
   13078 		ifmr->ifm_active |= IFM_1000_SX;
   13079 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   13080 		ifmr->ifm_active |= IFM_FDX;
   13081 	else
   13082 		ifmr->ifm_active |= IFM_HDX;
   13083 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13084 	if (ctrl & CTRL_RFCE)
   13085 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   13086 	if (ctrl & CTRL_TFCE)
   13087 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   13088 }
   13089 
   13090 /* XXX TBI only */
   13091 static int
   13092 wm_check_for_link(struct wm_softc *sc)
   13093 {
   13094 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   13095 	uint32_t rxcw;
   13096 	uint32_t ctrl;
   13097 	uint32_t status;
   13098 	bool signal;
   13099 
   13100 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
   13101 		device_xname(sc->sc_dev), __func__));
   13102 
   13103 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   13104 		/* XXX need some work for >= 82571 */
   13105 		if (sc->sc_type >= WM_T_82571) {
   13106 			sc->sc_tbi_linkup = 1;
   13107 			return 0;
   13108 		}
   13109 	}
   13110 
   13111 	rxcw = CSR_READ(sc, WMREG_RXCW);
   13112 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13113 	status = CSR_READ(sc, WMREG_STATUS);
   13114 	signal = wm_tbi_havesignal(sc, ctrl);
   13115 
   13116 	DPRINTF(sc, WM_DEBUG_LINK,
   13117 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   13118 		device_xname(sc->sc_dev), __func__, signal,
   13119 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   13120 
   13121 	/*
   13122 	 * SWDPIN   LU RXCW
   13123 	 *	0    0	  0
   13124 	 *	0    0	  1	(should not happen)
   13125 	 *	0    1	  0	(should not happen)
   13126 	 *	0    1	  1	(should not happen)
   13127 	 *	1    0	  0	Disable autonego and force linkup
   13128 	 *	1    0	  1	got /C/ but not linkup yet
   13129 	 *	1    1	  0	(linkup)
   13130 	 *	1    1	  1	If IFM_AUTO, back to autonego
   13131 	 *
   13132 	 */
   13133 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   13134 		DPRINTF(sc, WM_DEBUG_LINK,
   13135 		    ("%s: %s: force linkup and fullduplex\n",
   13136 			device_xname(sc->sc_dev), __func__));
   13137 		sc->sc_tbi_linkup = 0;
   13138 		/* Disable auto-negotiation in the TXCW register */
   13139 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   13140 
   13141 		/*
   13142 		 * Force link-up and also force full-duplex.
   13143 		 *
   13144 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   13145 		 * so we should update sc->sc_ctrl
   13146 		 */
   13147 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   13148 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13149 	} else if (((status & STATUS_LU) != 0)
   13150 	    && ((rxcw & RXCW_C) != 0)
   13151 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   13152 		sc->sc_tbi_linkup = 1;
   13153 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   13154 			device_xname(sc->sc_dev), __func__));
   13155 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13156 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   13157 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   13158 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
   13159 			device_xname(sc->sc_dev), __func__));
   13160 	} else {
   13161 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   13162 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   13163 			status));
   13164 	}
   13165 
   13166 	return 0;
   13167 }
   13168 
   13169 /*
   13170  * wm_tbi_tick:
   13171  *
   13172  *	Check the link on TBI devices.
   13173  *	This function acts as mii_tick().
   13174  */
   13175 static void
   13176 wm_tbi_tick(struct wm_softc *sc)
   13177 {
   13178 	struct mii_data *mii = &sc->sc_mii;
   13179 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13180 	uint32_t status;
   13181 
   13182 	KASSERT(mutex_owned(sc->sc_core_lock));
   13183 
   13184 	status = CSR_READ(sc, WMREG_STATUS);
   13185 
   13186 	/* XXX is this needed? */
   13187 	(void)CSR_READ(sc, WMREG_RXCW);
   13188 	(void)CSR_READ(sc, WMREG_CTRL);
   13189 
   13190 	/* set link status */
   13191 	if ((status & STATUS_LU) == 0) {
   13192 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   13193 			device_xname(sc->sc_dev)));
   13194 		sc->sc_tbi_linkup = 0;
   13195 	} else if (sc->sc_tbi_linkup == 0) {
   13196 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   13197 			device_xname(sc->sc_dev),
   13198 			(status & STATUS_FD) ? "FDX" : "HDX"));
   13199 		sc->sc_tbi_linkup = 1;
   13200 		sc->sc_tbi_serdes_ticks = 0;
   13201 	}
   13202 
   13203 	if ((sc->sc_if_flags & IFF_UP) == 0)
   13204 		goto setled;
   13205 
   13206 	if ((status & STATUS_LU) == 0) {
   13207 		sc->sc_tbi_linkup = 0;
   13208 		/* If the timer expired, retry autonegotiation */
   13209 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13210 		    && (++sc->sc_tbi_serdes_ticks
   13211 			>= sc->sc_tbi_serdes_anegticks)) {
   13212 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13213 				device_xname(sc->sc_dev), __func__));
   13214 			sc->sc_tbi_serdes_ticks = 0;
   13215 			/*
   13216 			 * Reset the link, and let autonegotiation do
   13217 			 * its thing
   13218 			 */
   13219 			sc->sc_ctrl |= CTRL_LRST;
   13220 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13221 			CSR_WRITE_FLUSH(sc);
   13222 			delay(1000);
   13223 			sc->sc_ctrl &= ~CTRL_LRST;
   13224 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13225 			CSR_WRITE_FLUSH(sc);
   13226 			delay(1000);
   13227 			CSR_WRITE(sc, WMREG_TXCW,
   13228 			    sc->sc_txcw & ~TXCW_ANE);
   13229 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13230 		}
   13231 	}
   13232 
   13233 setled:
   13234 	wm_tbi_serdes_set_linkled(sc);
   13235 }
   13236 
   13237 /* SERDES related */
   13238 static void
   13239 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   13240 {
   13241 	uint32_t reg;
   13242 
   13243 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13244 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13245 		return;
   13246 
   13247 	/* Enable PCS to turn on link */
   13248 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   13249 	reg |= PCS_CFG_PCS_EN;
   13250 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   13251 
   13252 	/* Power up the laser */
   13253 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13254 	reg &= ~CTRL_EXT_SWDPIN(3);
   13255 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13256 
   13257 	/* Flush the write to verify completion */
   13258 	CSR_WRITE_FLUSH(sc);
   13259 	delay(1000);
   13260 }
   13261 
   13262 static int
   13263 wm_serdes_mediachange(struct ifnet *ifp)
   13264 {
   13265 	struct wm_softc *sc = ifp->if_softc;
   13266 	bool pcs_autoneg = true; /* XXX */
   13267 	uint32_t ctrl_ext, pcs_lctl, reg;
   13268 
   13269 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13270 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13271 		return 0;
   13272 
   13273 	/* XXX Currently, this function is not called on 8257[12] */
   13274 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   13275 	    || (sc->sc_type >= WM_T_82575))
   13276 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   13277 
   13278 	/* Power on the sfp cage if present */
   13279 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13280 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   13281 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   13282 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13283 
   13284 	sc->sc_ctrl |= CTRL_SLU;
   13285 
   13286 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   13287 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   13288 
   13289 		reg = CSR_READ(sc, WMREG_CONNSW);
   13290 		reg |= CONNSW_ENRGSRC;
   13291 		CSR_WRITE(sc, WMREG_CONNSW, reg);
   13292 	}
   13293 
   13294 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   13295 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   13296 	case CTRL_EXT_LINK_MODE_SGMII:
   13297 		/* SGMII mode lets the phy handle forcing speed/duplex */
   13298 		pcs_autoneg = true;
   13299 		/* Autoneg time out should be disabled for SGMII mode */
   13300 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   13301 		break;
   13302 	case CTRL_EXT_LINK_MODE_1000KX:
   13303 		pcs_autoneg = false;
   13304 		/* FALLTHROUGH */
   13305 	default:
   13306 		if ((sc->sc_type == WM_T_82575)
   13307 		    || (sc->sc_type == WM_T_82576)) {
   13308 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   13309 				pcs_autoneg = false;
   13310 		}
   13311 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   13312 		    | CTRL_FRCFDX;
   13313 
   13314 		/* Set speed of 1000/Full if speed/duplex is forced */
   13315 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   13316 	}
   13317 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13318 
   13319 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   13320 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   13321 
   13322 	if (pcs_autoneg) {
   13323 		/* Set PCS register for autoneg */
   13324 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   13325 
   13326 		/* Disable force flow control for autoneg */
   13327 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   13328 
   13329 		/* Configure flow control advertisement for autoneg */
   13330 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   13331 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   13332 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   13333 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   13334 	} else
   13335 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   13336 
   13337 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   13338 
   13339 	return 0;
   13340 }
   13341 
   13342 static void
   13343 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13344 {
   13345 	struct wm_softc *sc = ifp->if_softc;
   13346 	struct mii_data *mii = &sc->sc_mii;
   13347 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13348 	uint32_t pcs_adv, pcs_lpab, reg;
   13349 
   13350 	ifmr->ifm_status = IFM_AVALID;
   13351 	ifmr->ifm_active = IFM_ETHER;
   13352 
   13353 	/* Check PCS */
   13354 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13355 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   13356 		ifmr->ifm_active |= IFM_NONE;
   13357 		sc->sc_tbi_linkup = 0;
   13358 		goto setled;
   13359 	}
   13360 
   13361 	sc->sc_tbi_linkup = 1;
   13362 	ifmr->ifm_status |= IFM_ACTIVE;
   13363 	if (sc->sc_type == WM_T_I354) {
   13364 		uint32_t status;
   13365 
   13366 		status = CSR_READ(sc, WMREG_STATUS);
   13367 		if (((status & STATUS_2P5_SKU) != 0)
   13368 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   13369 			ifmr->ifm_active |= IFM_2500_KX;
   13370 		} else
   13371 			ifmr->ifm_active |= IFM_1000_KX;
   13372 	} else {
   13373 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   13374 		case PCS_LSTS_SPEED_10:
   13375 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   13376 			break;
   13377 		case PCS_LSTS_SPEED_100:
   13378 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   13379 			break;
   13380 		case PCS_LSTS_SPEED_1000:
   13381 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13382 			break;
   13383 		default:
   13384 			device_printf(sc->sc_dev, "Unknown speed\n");
   13385 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13386 			break;
   13387 		}
   13388 	}
   13389 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   13390 	if ((reg & PCS_LSTS_FDX) != 0)
   13391 		ifmr->ifm_active |= IFM_FDX;
   13392 	else
   13393 		ifmr->ifm_active |= IFM_HDX;
   13394 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   13395 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   13396 		/* Check flow */
   13397 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13398 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   13399 			DPRINTF(sc, WM_DEBUG_LINK,
   13400 			    ("XXX LINKOK but not ACOMP\n"));
   13401 			goto setled;
   13402 		}
   13403 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   13404 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   13405 		DPRINTF(sc, WM_DEBUG_LINK,
   13406 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   13407 		if ((pcs_adv & TXCW_SYM_PAUSE)
   13408 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   13409 			mii->mii_media_active |= IFM_FLOW
   13410 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   13411 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   13412 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   13413 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   13414 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   13415 			mii->mii_media_active |= IFM_FLOW
   13416 			    | IFM_ETH_TXPAUSE;
   13417 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   13418 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   13419 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   13420 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   13421 			mii->mii_media_active |= IFM_FLOW
   13422 			    | IFM_ETH_RXPAUSE;
   13423 		}
   13424 	}
   13425 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   13426 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   13427 setled:
   13428 	wm_tbi_serdes_set_linkled(sc);
   13429 }
   13430 
   13431 /*
   13432  * wm_serdes_tick:
   13433  *
   13434  *	Check the link on serdes devices.
   13435  */
   13436 static void
   13437 wm_serdes_tick(struct wm_softc *sc)
   13438 {
   13439 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   13440 	struct mii_data *mii = &sc->sc_mii;
   13441 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13442 	uint32_t reg;
   13443 
   13444 	KASSERT(mutex_owned(sc->sc_core_lock));
   13445 
   13446 	mii->mii_media_status = IFM_AVALID;
   13447 	mii->mii_media_active = IFM_ETHER;
   13448 
   13449 	/* Check PCS */
   13450 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13451 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   13452 		mii->mii_media_status |= IFM_ACTIVE;
   13453 		sc->sc_tbi_linkup = 1;
   13454 		sc->sc_tbi_serdes_ticks = 0;
   13455 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   13456 		if ((reg & PCS_LSTS_FDX) != 0)
   13457 			mii->mii_media_active |= IFM_FDX;
   13458 		else
   13459 			mii->mii_media_active |= IFM_HDX;
   13460 	} else {
   13461 		mii->mii_media_status |= IFM_NONE;
   13462 		sc->sc_tbi_linkup = 0;
   13463 		/* If the timer expired, retry autonegotiation */
   13464 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13465 		    && (++sc->sc_tbi_serdes_ticks
   13466 			>= sc->sc_tbi_serdes_anegticks)) {
   13467 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13468 				device_xname(sc->sc_dev), __func__));
   13469 			sc->sc_tbi_serdes_ticks = 0;
   13470 			/* XXX */
   13471 			wm_serdes_mediachange(ifp);
   13472 		}
   13473 	}
   13474 
   13475 	wm_tbi_serdes_set_linkled(sc);
   13476 }
   13477 
   13478 /* SFP related */
   13479 
   13480 static int
   13481 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   13482 {
   13483 	uint32_t i2ccmd;
   13484 	int i;
   13485 
   13486 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   13487 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   13488 
   13489 	/* Poll the ready bit */
   13490 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   13491 		delay(50);
   13492 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   13493 		if (i2ccmd & I2CCMD_READY)
   13494 			break;
   13495 	}
   13496 	if ((i2ccmd & I2CCMD_READY) == 0)
   13497 		return -1;
   13498 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   13499 		return -1;
   13500 
   13501 	*data = i2ccmd & 0x00ff;
   13502 
   13503 	return 0;
   13504 }
   13505 
   13506 static uint32_t
   13507 wm_sfp_get_media_type(struct wm_softc *sc)
   13508 {
   13509 	uint32_t ctrl_ext;
   13510 	uint8_t val = 0;
   13511 	int timeout = 3;
   13512 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   13513 	int rv = -1;
   13514 
   13515 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13516 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   13517 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   13518 	CSR_WRITE_FLUSH(sc);
   13519 
   13520 	/* Read SFP module data */
   13521 	while (timeout) {
   13522 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   13523 		if (rv == 0)
   13524 			break;
   13525 		delay(100*1000); /* XXX too big */
   13526 		timeout--;
   13527 	}
   13528 	if (rv != 0)
   13529 		goto out;
   13530 
   13531 	switch (val) {
   13532 	case SFF_SFP_ID_SFF:
   13533 		aprint_normal_dev(sc->sc_dev,
   13534 		    "Module/Connector soldered to board\n");
   13535 		break;
   13536 	case SFF_SFP_ID_SFP:
   13537 		sc->sc_flags |= WM_F_SFP;
   13538 		break;
   13539 	case SFF_SFP_ID_UNKNOWN:
   13540 		goto out;
   13541 	default:
   13542 		break;
   13543 	}
   13544 
   13545 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   13546 	if (rv != 0)
   13547 		goto out;
   13548 
   13549 	sc->sc_sfptype = val;
   13550 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   13551 		mediatype = WM_MEDIATYPE_SERDES;
   13552 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   13553 		sc->sc_flags |= WM_F_SGMII;
   13554 		mediatype = WM_MEDIATYPE_COPPER;
   13555 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   13556 		sc->sc_flags |= WM_F_SGMII;
   13557 		mediatype = WM_MEDIATYPE_SERDES;
   13558 	} else {
   13559 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   13560 		    __func__, sc->sc_sfptype);
   13561 		sc->sc_sfptype = 0; /* XXX unknown */
   13562 	}
   13563 
   13564 out:
   13565 	/* Restore I2C interface setting */
   13566 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13567 
   13568 	return mediatype;
   13569 }
   13570 
   13571 /*
   13572  * NVM related.
   13573  * Microwire, SPI (w/wo EERD) and Flash.
   13574  */
   13575 
   13576 /* Both spi and uwire */
   13577 
   13578 /*
   13579  * wm_eeprom_sendbits:
   13580  *
   13581  *	Send a series of bits to the EEPROM.
   13582  */
   13583 static void
   13584 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   13585 {
   13586 	uint32_t reg;
   13587 	int x;
   13588 
   13589 	reg = CSR_READ(sc, WMREG_EECD);
   13590 
   13591 	for (x = nbits; x > 0; x--) {
   13592 		if (bits & (1U << (x - 1)))
   13593 			reg |= EECD_DI;
   13594 		else
   13595 			reg &= ~EECD_DI;
   13596 		CSR_WRITE(sc, WMREG_EECD, reg);
   13597 		CSR_WRITE_FLUSH(sc);
   13598 		delay(2);
   13599 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13600 		CSR_WRITE_FLUSH(sc);
   13601 		delay(2);
   13602 		CSR_WRITE(sc, WMREG_EECD, reg);
   13603 		CSR_WRITE_FLUSH(sc);
   13604 		delay(2);
   13605 	}
   13606 }
   13607 
   13608 /*
   13609  * wm_eeprom_recvbits:
   13610  *
   13611  *	Receive a series of bits from the EEPROM.
   13612  */
   13613 static void
   13614 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   13615 {
   13616 	uint32_t reg, val;
   13617 	int x;
   13618 
   13619 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   13620 
   13621 	val = 0;
   13622 	for (x = nbits; x > 0; x--) {
   13623 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13624 		CSR_WRITE_FLUSH(sc);
   13625 		delay(2);
   13626 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   13627 			val |= (1U << (x - 1));
   13628 		CSR_WRITE(sc, WMREG_EECD, reg);
   13629 		CSR_WRITE_FLUSH(sc);
   13630 		delay(2);
   13631 	}
   13632 	*valp = val;
   13633 }
   13634 
   13635 /* Microwire */
   13636 
   13637 /*
   13638  * wm_nvm_read_uwire:
   13639  *
   13640  *	Read a word from the EEPROM using the MicroWire protocol.
   13641  */
   13642 static int
   13643 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13644 {
   13645 	uint32_t reg, val;
   13646 	int i, rv;
   13647 
   13648 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13649 		device_xname(sc->sc_dev), __func__));
   13650 
   13651 	rv = sc->nvm.acquire(sc);
   13652 	if (rv != 0)
   13653 		return rv;
   13654 
   13655 	for (i = 0; i < wordcnt; i++) {
   13656 		/* Clear SK and DI. */
   13657 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   13658 		CSR_WRITE(sc, WMREG_EECD, reg);
   13659 
   13660 		/*
   13661 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   13662 		 * and Xen.
   13663 		 *
   13664 		 * We use this workaround only for 82540 because qemu's
   13665 		 * e1000 act as 82540.
   13666 		 */
   13667 		if (sc->sc_type == WM_T_82540) {
   13668 			reg |= EECD_SK;
   13669 			CSR_WRITE(sc, WMREG_EECD, reg);
   13670 			reg &= ~EECD_SK;
   13671 			CSR_WRITE(sc, WMREG_EECD, reg);
   13672 			CSR_WRITE_FLUSH(sc);
   13673 			delay(2);
   13674 		}
   13675 		/* XXX: end of workaround */
   13676 
   13677 		/* Set CHIP SELECT. */
   13678 		reg |= EECD_CS;
   13679 		CSR_WRITE(sc, WMREG_EECD, reg);
   13680 		CSR_WRITE_FLUSH(sc);
   13681 		delay(2);
   13682 
   13683 		/* Shift in the READ command. */
   13684 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   13685 
   13686 		/* Shift in address. */
   13687 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   13688 
   13689 		/* Shift out the data. */
   13690 		wm_eeprom_recvbits(sc, &val, 16);
   13691 		data[i] = val & 0xffff;
   13692 
   13693 		/* Clear CHIP SELECT. */
   13694 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   13695 		CSR_WRITE(sc, WMREG_EECD, reg);
   13696 		CSR_WRITE_FLUSH(sc);
   13697 		delay(2);
   13698 	}
   13699 
   13700 	sc->nvm.release(sc);
   13701 	return 0;
   13702 }
   13703 
   13704 /* SPI */
   13705 
   13706 /*
   13707  * Set SPI and FLASH related information from the EECD register.
   13708  * For 82541 and 82547, the word size is taken from EEPROM.
   13709  */
   13710 static int
   13711 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   13712 {
   13713 	int size;
   13714 	uint32_t reg;
   13715 	uint16_t data;
   13716 
   13717 	reg = CSR_READ(sc, WMREG_EECD);
   13718 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   13719 
   13720 	/* Read the size of NVM from EECD by default */
   13721 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13722 	switch (sc->sc_type) {
   13723 	case WM_T_82541:
   13724 	case WM_T_82541_2:
   13725 	case WM_T_82547:
   13726 	case WM_T_82547_2:
   13727 		/* Set dummy value to access EEPROM */
   13728 		sc->sc_nvm_wordsize = 64;
   13729 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   13730 			aprint_error_dev(sc->sc_dev,
   13731 			    "%s: failed to read EEPROM size\n", __func__);
   13732 		}
   13733 		reg = data;
   13734 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13735 		if (size == 0)
   13736 			size = 6; /* 64 word size */
   13737 		else
   13738 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   13739 		break;
   13740 	case WM_T_80003:
   13741 	case WM_T_82571:
   13742 	case WM_T_82572:
   13743 	case WM_T_82573: /* SPI case */
   13744 	case WM_T_82574: /* SPI case */
   13745 	case WM_T_82583: /* SPI case */
   13746 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13747 		if (size > 14)
   13748 			size = 14;
   13749 		break;
   13750 	case WM_T_82575:
   13751 	case WM_T_82576:
   13752 	case WM_T_82580:
   13753 	case WM_T_I350:
   13754 	case WM_T_I354:
   13755 	case WM_T_I210:
   13756 	case WM_T_I211:
   13757 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13758 		if (size > 15)
   13759 			size = 15;
   13760 		break;
   13761 	default:
   13762 		aprint_error_dev(sc->sc_dev,
   13763 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   13764 		return -1;
   13765 		break;
   13766 	}
   13767 
   13768 	sc->sc_nvm_wordsize = 1 << size;
   13769 
   13770 	return 0;
   13771 }
   13772 
   13773 /*
   13774  * wm_nvm_ready_spi:
   13775  *
   13776  *	Wait for a SPI EEPROM to be ready for commands.
   13777  */
   13778 static int
   13779 wm_nvm_ready_spi(struct wm_softc *sc)
   13780 {
   13781 	uint32_t val;
   13782 	int usec;
   13783 
   13784 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13785 		device_xname(sc->sc_dev), __func__));
   13786 
   13787 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   13788 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   13789 		wm_eeprom_recvbits(sc, &val, 8);
   13790 		if ((val & SPI_SR_RDY) == 0)
   13791 			break;
   13792 	}
   13793 	if (usec >= SPI_MAX_RETRIES) {
   13794 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   13795 		return -1;
   13796 	}
   13797 	return 0;
   13798 }
   13799 
   13800 /*
   13801  * wm_nvm_read_spi:
   13802  *
   13803  *	Read a work from the EEPROM using the SPI protocol.
   13804  */
   13805 static int
   13806 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13807 {
   13808 	uint32_t reg, val;
   13809 	int i;
   13810 	uint8_t opc;
   13811 	int rv;
   13812 
   13813 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13814 		device_xname(sc->sc_dev), __func__));
   13815 
   13816 	rv = sc->nvm.acquire(sc);
   13817 	if (rv != 0)
   13818 		return rv;
   13819 
   13820 	/* Clear SK and CS. */
   13821 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   13822 	CSR_WRITE(sc, WMREG_EECD, reg);
   13823 	CSR_WRITE_FLUSH(sc);
   13824 	delay(2);
   13825 
   13826 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   13827 		goto out;
   13828 
   13829 	/* Toggle CS to flush commands. */
   13830 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   13831 	CSR_WRITE_FLUSH(sc);
   13832 	delay(2);
   13833 	CSR_WRITE(sc, WMREG_EECD, reg);
   13834 	CSR_WRITE_FLUSH(sc);
   13835 	delay(2);
   13836 
   13837 	opc = SPI_OPC_READ;
   13838 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   13839 		opc |= SPI_OPC_A8;
   13840 
   13841 	wm_eeprom_sendbits(sc, opc, 8);
   13842 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   13843 
   13844 	for (i = 0; i < wordcnt; i++) {
   13845 		wm_eeprom_recvbits(sc, &val, 16);
   13846 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   13847 	}
   13848 
   13849 	/* Raise CS and clear SK. */
   13850 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   13851 	CSR_WRITE(sc, WMREG_EECD, reg);
   13852 	CSR_WRITE_FLUSH(sc);
   13853 	delay(2);
   13854 
   13855 out:
   13856 	sc->nvm.release(sc);
   13857 	return rv;
   13858 }
   13859 
   13860 /* Using with EERD */
   13861 
   13862 static int
   13863 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   13864 {
   13865 	uint32_t attempts = 100000;
   13866 	uint32_t i, reg = 0;
   13867 	int32_t done = -1;
   13868 
   13869 	for (i = 0; i < attempts; i++) {
   13870 		reg = CSR_READ(sc, rw);
   13871 
   13872 		if (reg & EERD_DONE) {
   13873 			done = 0;
   13874 			break;
   13875 		}
   13876 		delay(5);
   13877 	}
   13878 
   13879 	return done;
   13880 }
   13881 
   13882 static int
   13883 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   13884 {
   13885 	int i, eerd = 0;
   13886 	int rv;
   13887 
   13888 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13889 		device_xname(sc->sc_dev), __func__));
   13890 
   13891 	rv = sc->nvm.acquire(sc);
   13892 	if (rv != 0)
   13893 		return rv;
   13894 
   13895 	for (i = 0; i < wordcnt; i++) {
   13896 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   13897 		CSR_WRITE(sc, WMREG_EERD, eerd);
   13898 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   13899 		if (rv != 0) {
   13900 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   13901 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   13902 			break;
   13903 		}
   13904 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   13905 	}
   13906 
   13907 	sc->nvm.release(sc);
   13908 	return rv;
   13909 }
   13910 
   13911 /* Flash */
   13912 
   13913 static int
   13914 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   13915 {
   13916 	uint32_t eecd;
   13917 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   13918 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   13919 	uint32_t nvm_dword = 0;
   13920 	uint8_t sig_byte = 0;
   13921 	int rv;
   13922 
   13923 	switch (sc->sc_type) {
   13924 	case WM_T_PCH_SPT:
   13925 	case WM_T_PCH_CNP:
   13926 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   13927 		act_offset = ICH_NVM_SIG_WORD * 2;
   13928 
   13929 		/* Set bank to 0 in case flash read fails. */
   13930 		*bank = 0;
   13931 
   13932 		/* Check bank 0 */
   13933 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   13934 		if (rv != 0)
   13935 			return rv;
   13936 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13937 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13938 			*bank = 0;
   13939 			return 0;
   13940 		}
   13941 
   13942 		/* Check bank 1 */
   13943 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   13944 		    &nvm_dword);
   13945 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13946 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13947 			*bank = 1;
   13948 			return 0;
   13949 		}
   13950 		aprint_error_dev(sc->sc_dev,
   13951 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   13952 		return -1;
   13953 	case WM_T_ICH8:
   13954 	case WM_T_ICH9:
   13955 		eecd = CSR_READ(sc, WMREG_EECD);
   13956 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   13957 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   13958 			return 0;
   13959 		}
   13960 		/* FALLTHROUGH */
   13961 	default:
   13962 		/* Default to 0 */
   13963 		*bank = 0;
   13964 
   13965 		/* Check bank 0 */
   13966 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   13967 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13968 			*bank = 0;
   13969 			return 0;
   13970 		}
   13971 
   13972 		/* Check bank 1 */
   13973 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   13974 		    &sig_byte);
   13975 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13976 			*bank = 1;
   13977 			return 0;
   13978 		}
   13979 	}
   13980 
   13981 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   13982 		device_xname(sc->sc_dev)));
   13983 	return -1;
   13984 }
   13985 
   13986 /******************************************************************************
   13987  * This function does initial flash setup so that a new read/write/erase cycle
   13988  * can be started.
   13989  *
   13990  * sc - The pointer to the hw structure
   13991  ****************************************************************************/
   13992 static int32_t
   13993 wm_ich8_cycle_init(struct wm_softc *sc)
   13994 {
   13995 	uint16_t hsfsts;
   13996 	int32_t error = 1;
   13997 	int32_t i     = 0;
   13998 
   13999 	if (sc->sc_type >= WM_T_PCH_SPT)
   14000 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   14001 	else
   14002 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   14003 
   14004 	/* May be check the Flash Des Valid bit in Hw status */
   14005 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   14006 		return error;
   14007 
   14008 	/* Clear FCERR in Hw status by writing 1 */
   14009 	/* Clear DAEL in Hw status by writing a 1 */
   14010 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   14011 
   14012 	if (sc->sc_type >= WM_T_PCH_SPT)
   14013 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   14014 	else
   14015 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   14016 
   14017 	/*
   14018 	 * Either we should have a hardware SPI cycle in progress bit to check
   14019 	 * against, in order to start a new cycle or FDONE bit should be
   14020 	 * changed in the hardware so that it is 1 after hardware reset, which
   14021 	 * can then be used as an indication whether a cycle is in progress or
   14022 	 * has been completed .. we should also have some software semaphore
   14023 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   14024 	 * threads access to those bits can be sequentiallized or a way so that
   14025 	 * 2 threads don't start the cycle at the same time
   14026 	 */
   14027 
   14028 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   14029 		/*
   14030 		 * There is no cycle running at present, so we can start a
   14031 		 * cycle
   14032 		 */
   14033 
   14034 		/* Begin by setting Flash Cycle Done. */
   14035 		hsfsts |= HSFSTS_DONE;
   14036 		if (sc->sc_type >= WM_T_PCH_SPT)
   14037 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14038 			    hsfsts & 0xffffUL);
   14039 		else
   14040 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   14041 		error = 0;
   14042 	} else {
   14043 		/*
   14044 		 * Otherwise poll for sometime so the current cycle has a
   14045 		 * chance to end before giving up.
   14046 		 */
   14047 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   14048 			if (sc->sc_type >= WM_T_PCH_SPT)
   14049 				hsfsts = ICH8_FLASH_READ32(sc,
   14050 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14051 			else
   14052 				hsfsts = ICH8_FLASH_READ16(sc,
   14053 				    ICH_FLASH_HSFSTS);
   14054 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   14055 				error = 0;
   14056 				break;
   14057 			}
   14058 			delay(1);
   14059 		}
   14060 		if (error == 0) {
   14061 			/*
   14062 			 * Successful in waiting for previous cycle to timeout,
   14063 			 * now set the Flash Cycle Done.
   14064 			 */
   14065 			hsfsts |= HSFSTS_DONE;
   14066 			if (sc->sc_type >= WM_T_PCH_SPT)
   14067 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14068 				    hsfsts & 0xffffUL);
   14069 			else
   14070 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   14071 				    hsfsts);
   14072 		}
   14073 	}
   14074 	return error;
   14075 }
   14076 
   14077 /******************************************************************************
   14078  * This function starts a flash cycle and waits for its completion
   14079  *
   14080  * sc - The pointer to the hw structure
   14081  ****************************************************************************/
   14082 static int32_t
   14083 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   14084 {
   14085 	uint16_t hsflctl;
   14086 	uint16_t hsfsts;
   14087 	int32_t error = 1;
   14088 	uint32_t i = 0;
   14089 
   14090 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   14091 	if (sc->sc_type >= WM_T_PCH_SPT)
   14092 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   14093 	else
   14094 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14095 	hsflctl |= HSFCTL_GO;
   14096 	if (sc->sc_type >= WM_T_PCH_SPT)
   14097 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14098 		    (uint32_t)hsflctl << 16);
   14099 	else
   14100 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14101 
   14102 	/* Wait till FDONE bit is set to 1 */
   14103 	do {
   14104 		if (sc->sc_type >= WM_T_PCH_SPT)
   14105 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14106 			    & 0xffffUL;
   14107 		else
   14108 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   14109 		if (hsfsts & HSFSTS_DONE)
   14110 			break;
   14111 		delay(1);
   14112 		i++;
   14113 	} while (i < timeout);
   14114 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   14115 		error = 0;
   14116 
   14117 	return error;
   14118 }
   14119 
   14120 /******************************************************************************
   14121  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   14122  *
   14123  * sc - The pointer to the hw structure
   14124  * index - The index of the byte or word to read.
   14125  * size - Size of data to read, 1=byte 2=word, 4=dword
   14126  * data - Pointer to the word to store the value read.
   14127  *****************************************************************************/
   14128 static int32_t
   14129 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   14130     uint32_t size, uint32_t *data)
   14131 {
   14132 	uint16_t hsfsts;
   14133 	uint16_t hsflctl;
   14134 	uint32_t flash_linear_address;
   14135 	uint32_t flash_data = 0;
   14136 	int32_t error = 1;
   14137 	int32_t count = 0;
   14138 
   14139 	if (size < 1  || size > 4 || data == 0x0 ||
   14140 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   14141 		return error;
   14142 
   14143 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   14144 	    sc->sc_ich8_flash_base;
   14145 
   14146 	do {
   14147 		delay(1);
   14148 		/* Steps */
   14149 		error = wm_ich8_cycle_init(sc);
   14150 		if (error)
   14151 			break;
   14152 
   14153 		if (sc->sc_type >= WM_T_PCH_SPT)
   14154 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14155 			    >> 16;
   14156 		else
   14157 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14158 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   14159 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   14160 		    & HSFCTL_BCOUNT_MASK;
   14161 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   14162 		if (sc->sc_type >= WM_T_PCH_SPT) {
   14163 			/*
   14164 			 * In SPT, This register is in Lan memory space, not
   14165 			 * flash. Therefore, only 32 bit access is supported.
   14166 			 */
   14167 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14168 			    (uint32_t)hsflctl << 16);
   14169 		} else
   14170 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14171 
   14172 		/*
   14173 		 * Write the last 24 bits of index into Flash Linear address
   14174 		 * field in Flash Address
   14175 		 */
   14176 		/* TODO: TBD maybe check the index against the size of flash */
   14177 
   14178 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   14179 
   14180 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   14181 
   14182 		/*
   14183 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   14184 		 * the whole sequence a few more times, else read in (shift in)
   14185 		 * the Flash Data0, the order is least significant byte first
   14186 		 * msb to lsb
   14187 		 */
   14188 		if (error == 0) {
   14189 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   14190 			if (size == 1)
   14191 				*data = (uint8_t)(flash_data & 0x000000FF);
   14192 			else if (size == 2)
   14193 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   14194 			else if (size == 4)
   14195 				*data = (uint32_t)flash_data;
   14196 			break;
   14197 		} else {
   14198 			/*
   14199 			 * If we've gotten here, then things are probably
   14200 			 * completely hosed, but if the error condition is
   14201 			 * detected, it won't hurt to give it another try...
   14202 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   14203 			 */
   14204 			if (sc->sc_type >= WM_T_PCH_SPT)
   14205 				hsfsts = ICH8_FLASH_READ32(sc,
   14206 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14207 			else
   14208 				hsfsts = ICH8_FLASH_READ16(sc,
   14209 				    ICH_FLASH_HSFSTS);
   14210 
   14211 			if (hsfsts & HSFSTS_ERR) {
   14212 				/* Repeat for some time before giving up. */
   14213 				continue;
   14214 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   14215 				break;
   14216 		}
   14217 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   14218 
   14219 	return error;
   14220 }
   14221 
   14222 /******************************************************************************
   14223  * Reads a single byte from the NVM using the ICH8 flash access registers.
   14224  *
   14225  * sc - pointer to wm_hw structure
   14226  * index - The index of the byte to read.
   14227  * data - Pointer to a byte to store the value read.
   14228  *****************************************************************************/
   14229 static int32_t
   14230 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   14231 {
   14232 	int32_t status;
   14233 	uint32_t word = 0;
   14234 
   14235 	status = wm_read_ich8_data(sc, index, 1, &word);
   14236 	if (status == 0)
   14237 		*data = (uint8_t)word;
   14238 	else
   14239 		*data = 0;
   14240 
   14241 	return status;
   14242 }
   14243 
   14244 /******************************************************************************
   14245  * Reads a word from the NVM using the ICH8 flash access registers.
   14246  *
   14247  * sc - pointer to wm_hw structure
   14248  * index - The starting byte index of the word to read.
   14249  * data - Pointer to a word to store the value read.
   14250  *****************************************************************************/
   14251 static int32_t
   14252 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   14253 {
   14254 	int32_t status;
   14255 	uint32_t word = 0;
   14256 
   14257 	status = wm_read_ich8_data(sc, index, 2, &word);
   14258 	if (status == 0)
   14259 		*data = (uint16_t)word;
   14260 	else
   14261 		*data = 0;
   14262 
   14263 	return status;
   14264 }
   14265 
   14266 /******************************************************************************
   14267  * Reads a dword from the NVM using the ICH8 flash access registers.
   14268  *
   14269  * sc - pointer to wm_hw structure
   14270  * index - The starting byte index of the word to read.
   14271  * data - Pointer to a word to store the value read.
   14272  *****************************************************************************/
   14273 static int32_t
   14274 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   14275 {
   14276 	int32_t status;
   14277 
   14278 	status = wm_read_ich8_data(sc, index, 4, data);
   14279 	return status;
   14280 }
   14281 
   14282 /******************************************************************************
   14283  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   14284  * register.
   14285  *
   14286  * sc - Struct containing variables accessed by shared code
   14287  * offset - offset of word in the EEPROM to read
   14288  * data - word read from the EEPROM
   14289  * words - number of words to read
   14290  *****************************************************************************/
   14291 static int
   14292 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14293 {
   14294 	int rv;
   14295 	uint32_t flash_bank = 0;
   14296 	uint32_t act_offset = 0;
   14297 	uint32_t bank_offset = 0;
   14298 	uint16_t word = 0;
   14299 	uint16_t i = 0;
   14300 
   14301 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14302 		device_xname(sc->sc_dev), __func__));
   14303 
   14304 	rv = sc->nvm.acquire(sc);
   14305 	if (rv != 0)
   14306 		return rv;
   14307 
   14308 	/*
   14309 	 * We need to know which is the valid flash bank.  In the event
   14310 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14311 	 * managing flash_bank. So it cannot be trusted and needs
   14312 	 * to be updated with each read.
   14313 	 */
   14314 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14315 	if (rv) {
   14316 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14317 			device_xname(sc->sc_dev)));
   14318 		flash_bank = 0;
   14319 	}
   14320 
   14321 	/*
   14322 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14323 	 * size
   14324 	 */
   14325 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14326 
   14327 	for (i = 0; i < words; i++) {
   14328 		/* The NVM part needs a byte offset, hence * 2 */
   14329 		act_offset = bank_offset + ((offset + i) * 2);
   14330 		rv = wm_read_ich8_word(sc, act_offset, &word);
   14331 		if (rv) {
   14332 			aprint_error_dev(sc->sc_dev,
   14333 			    "%s: failed to read NVM\n", __func__);
   14334 			break;
   14335 		}
   14336 		data[i] = word;
   14337 	}
   14338 
   14339 	sc->nvm.release(sc);
   14340 	return rv;
   14341 }
   14342 
   14343 /******************************************************************************
   14344  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   14345  * register.
   14346  *
   14347  * sc - Struct containing variables accessed by shared code
   14348  * offset - offset of word in the EEPROM to read
   14349  * data - word read from the EEPROM
   14350  * words - number of words to read
   14351  *****************************************************************************/
   14352 static int
   14353 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14354 {
   14355 	int	 rv;
   14356 	uint32_t flash_bank = 0;
   14357 	uint32_t act_offset = 0;
   14358 	uint32_t bank_offset = 0;
   14359 	uint32_t dword = 0;
   14360 	uint16_t i = 0;
   14361 
   14362 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14363 		device_xname(sc->sc_dev), __func__));
   14364 
   14365 	rv = sc->nvm.acquire(sc);
   14366 	if (rv != 0)
   14367 		return rv;
   14368 
   14369 	/*
   14370 	 * We need to know which is the valid flash bank.  In the event
   14371 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14372 	 * managing flash_bank. So it cannot be trusted and needs
   14373 	 * to be updated with each read.
   14374 	 */
   14375 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14376 	if (rv) {
   14377 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14378 			device_xname(sc->sc_dev)));
   14379 		flash_bank = 0;
   14380 	}
   14381 
   14382 	/*
   14383 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14384 	 * size
   14385 	 */
   14386 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14387 
   14388 	for (i = 0; i < words; i++) {
   14389 		/* The NVM part needs a byte offset, hence * 2 */
   14390 		act_offset = bank_offset + ((offset + i) * 2);
   14391 		/* but we must read dword aligned, so mask ... */
   14392 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   14393 		if (rv) {
   14394 			aprint_error_dev(sc->sc_dev,
   14395 			    "%s: failed to read NVM\n", __func__);
   14396 			break;
   14397 		}
   14398 		/* ... and pick out low or high word */
   14399 		if ((act_offset & 0x2) == 0)
   14400 			data[i] = (uint16_t)(dword & 0xFFFF);
   14401 		else
   14402 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   14403 	}
   14404 
   14405 	sc->nvm.release(sc);
   14406 	return rv;
   14407 }
   14408 
   14409 /* iNVM */
   14410 
   14411 static int
   14412 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   14413 {
   14414 	int32_t	 rv = 0;
   14415 	uint32_t invm_dword;
   14416 	uint16_t i;
   14417 	uint8_t record_type, word_address;
   14418 
   14419 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14420 		device_xname(sc->sc_dev), __func__));
   14421 
   14422 	for (i = 0; i < INVM_SIZE; i++) {
   14423 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   14424 		/* Get record type */
   14425 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   14426 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   14427 			break;
   14428 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   14429 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   14430 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   14431 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   14432 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   14433 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   14434 			if (word_address == address) {
   14435 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   14436 				rv = 0;
   14437 				break;
   14438 			}
   14439 		}
   14440 	}
   14441 
   14442 	return rv;
   14443 }
   14444 
   14445 static int
   14446 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14447 {
   14448 	int i, rv;
   14449 
   14450 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14451 		device_xname(sc->sc_dev), __func__));
   14452 
   14453 	rv = sc->nvm.acquire(sc);
   14454 	if (rv != 0)
   14455 		return rv;
   14456 
   14457 	for (i = 0; i < words; i++) {
   14458 		switch (offset + i) {
   14459 		case NVM_OFF_MACADDR:
   14460 		case NVM_OFF_MACADDR1:
   14461 		case NVM_OFF_MACADDR2:
   14462 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   14463 			if (rv != 0) {
   14464 				data[i] = 0xffff;
   14465 				rv = -1;
   14466 			}
   14467 			break;
   14468 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   14469 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14470 			if (rv != 0) {
   14471 				*data = INVM_DEFAULT_AL;
   14472 				rv = 0;
   14473 			}
   14474 			break;
   14475 		case NVM_OFF_CFG2:
   14476 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14477 			if (rv != 0) {
   14478 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   14479 				rv = 0;
   14480 			}
   14481 			break;
   14482 		case NVM_OFF_CFG4:
   14483 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14484 			if (rv != 0) {
   14485 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   14486 				rv = 0;
   14487 			}
   14488 			break;
   14489 		case NVM_OFF_LED_1_CFG:
   14490 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14491 			if (rv != 0) {
   14492 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   14493 				rv = 0;
   14494 			}
   14495 			break;
   14496 		case NVM_OFF_LED_0_2_CFG:
   14497 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14498 			if (rv != 0) {
   14499 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   14500 				rv = 0;
   14501 			}
   14502 			break;
   14503 		case NVM_OFF_ID_LED_SETTINGS:
   14504 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14505 			if (rv != 0) {
   14506 				*data = ID_LED_RESERVED_FFFF;
   14507 				rv = 0;
   14508 			}
   14509 			break;
   14510 		default:
   14511 			DPRINTF(sc, WM_DEBUG_NVM,
   14512 			    ("NVM word 0x%02x is not mapped.\n", offset));
   14513 			*data = NVM_RESERVED_WORD;
   14514 			break;
   14515 		}
   14516 	}
   14517 
   14518 	sc->nvm.release(sc);
   14519 	return rv;
   14520 }
   14521 
   14522 /* Lock, detecting NVM type, validate checksum, version and read */
   14523 
   14524 static int
   14525 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   14526 {
   14527 	uint32_t eecd = 0;
   14528 
   14529 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   14530 	    || sc->sc_type == WM_T_82583) {
   14531 		eecd = CSR_READ(sc, WMREG_EECD);
   14532 
   14533 		/* Isolate bits 15 & 16 */
   14534 		eecd = ((eecd >> 15) & 0x03);
   14535 
   14536 		/* If both bits are set, device is Flash type */
   14537 		if (eecd == 0x03)
   14538 			return 0;
   14539 	}
   14540 	return 1;
   14541 }
   14542 
   14543 static int
   14544 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   14545 {
   14546 	uint32_t eec;
   14547 
   14548 	eec = CSR_READ(sc, WMREG_EEC);
   14549 	if ((eec & EEC_FLASH_DETECTED) != 0)
   14550 		return 1;
   14551 
   14552 	return 0;
   14553 }
   14554 
   14555 /*
   14556  * wm_nvm_validate_checksum
   14557  *
   14558  * The checksum is defined as the sum of the first 64 (16 bit) words.
   14559  */
   14560 static int
   14561 wm_nvm_validate_checksum(struct wm_softc *sc)
   14562 {
   14563 	uint16_t checksum;
   14564 	uint16_t eeprom_data;
   14565 #ifdef WM_DEBUG
   14566 	uint16_t csum_wordaddr, valid_checksum;
   14567 #endif
   14568 	int i;
   14569 
   14570 	checksum = 0;
   14571 
   14572 	/* Don't check for I211 */
   14573 	if (sc->sc_type == WM_T_I211)
   14574 		return 0;
   14575 
   14576 #ifdef WM_DEBUG
   14577 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   14578 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   14579 		csum_wordaddr = NVM_OFF_COMPAT;
   14580 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   14581 	} else {
   14582 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   14583 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   14584 	}
   14585 
   14586 	/* Dump EEPROM image for debug */
   14587 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14588 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14589 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   14590 		/* XXX PCH_SPT? */
   14591 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   14592 		if ((eeprom_data & valid_checksum) == 0)
   14593 			DPRINTF(sc, WM_DEBUG_NVM,
   14594 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   14595 				device_xname(sc->sc_dev), eeprom_data,
   14596 				valid_checksum));
   14597 	}
   14598 
   14599 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
   14600 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   14601 		for (i = 0; i < NVM_SIZE; i++) {
   14602 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14603 				printf("XXXX ");
   14604 			else
   14605 				printf("%04hx ", eeprom_data);
   14606 			if (i % 8 == 7)
   14607 				printf("\n");
   14608 		}
   14609 	}
   14610 
   14611 #endif /* WM_DEBUG */
   14612 
   14613 	for (i = 0; i < NVM_SIZE; i++) {
   14614 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14615 			return -1;
   14616 		checksum += eeprom_data;
   14617 	}
   14618 
   14619 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   14620 #ifdef WM_DEBUG
   14621 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   14622 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   14623 #endif
   14624 	}
   14625 
   14626 	return 0;
   14627 }
   14628 
   14629 static void
   14630 wm_nvm_version_invm(struct wm_softc *sc)
   14631 {
   14632 	uint32_t dword;
   14633 
   14634 	/*
   14635 	 * Linux's code to decode version is very strange, so we don't
   14636 	 * obey that algorithm and just use word 61 as the document.
   14637 	 * Perhaps it's not perfect though...
   14638 	 *
   14639 	 * Example:
   14640 	 *
   14641 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   14642 	 */
   14643 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   14644 	dword = __SHIFTOUT(dword, INVM_VER_1);
   14645 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   14646 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   14647 }
   14648 
   14649 static void
   14650 wm_nvm_version(struct wm_softc *sc)
   14651 {
   14652 	uint16_t major, minor, build, patch;
   14653 	uint16_t uid0, uid1;
   14654 	uint16_t nvm_data;
   14655 	uint16_t off;
   14656 	bool check_version = false;
   14657 	bool check_optionrom = false;
   14658 	bool have_build = false;
   14659 	bool have_uid = true;
   14660 
   14661 	/*
   14662 	 * Version format:
   14663 	 *
   14664 	 * XYYZ
   14665 	 * X0YZ
   14666 	 * X0YY
   14667 	 *
   14668 	 * Example:
   14669 	 *
   14670 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   14671 	 *	82571	0x50a6	5.10.6?
   14672 	 *	82572	0x506a	5.6.10?
   14673 	 *	82572EI	0x5069	5.6.9?
   14674 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   14675 	 *		0x2013	2.1.3?
   14676 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   14677 	 * ICH8+82567	0x0040	0.4.0?
   14678 	 * ICH9+82566	0x1040	1.4.0?
   14679 	 *ICH10+82567	0x0043	0.4.3?
   14680 	 *  PCH+82577	0x00c1	0.12.1?
   14681 	 * PCH2+82579	0x00d3	0.13.3?
   14682 	 *		0x00d4	0.13.4?
   14683 	 *  LPT+I218	0x0023	0.2.3?
   14684 	 *  SPT+I219	0x0084	0.8.4?
   14685 	 *  CNP+I219	0x0054	0.5.4?
   14686 	 */
   14687 
   14688 	/*
   14689 	 * XXX
   14690 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   14691 	 * I've never seen real 82574 hardware with such small SPI ROM.
   14692 	 */
   14693 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   14694 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   14695 		have_uid = false;
   14696 
   14697 	switch (sc->sc_type) {
   14698 	case WM_T_82571:
   14699 	case WM_T_82572:
   14700 	case WM_T_82574:
   14701 	case WM_T_82583:
   14702 		check_version = true;
   14703 		check_optionrom = true;
   14704 		have_build = true;
   14705 		break;
   14706 	case WM_T_ICH8:
   14707 	case WM_T_ICH9:
   14708 	case WM_T_ICH10:
   14709 	case WM_T_PCH:
   14710 	case WM_T_PCH2:
   14711 	case WM_T_PCH_LPT:
   14712 	case WM_T_PCH_SPT:
   14713 	case WM_T_PCH_CNP:
   14714 		check_version = true;
   14715 		have_build = true;
   14716 		have_uid = false;
   14717 		break;
   14718 	case WM_T_82575:
   14719 	case WM_T_82576:
   14720 	case WM_T_82580:
   14721 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   14722 			check_version = true;
   14723 		break;
   14724 	case WM_T_I211:
   14725 		wm_nvm_version_invm(sc);
   14726 		have_uid = false;
   14727 		goto printver;
   14728 	case WM_T_I210:
   14729 		if (!wm_nvm_flash_presence_i210(sc)) {
   14730 			wm_nvm_version_invm(sc);
   14731 			have_uid = false;
   14732 			goto printver;
   14733 		}
   14734 		/* FALLTHROUGH */
   14735 	case WM_T_I350:
   14736 	case WM_T_I354:
   14737 		check_version = true;
   14738 		check_optionrom = true;
   14739 		break;
   14740 	default:
   14741 		return;
   14742 	}
   14743 	if (check_version
   14744 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   14745 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   14746 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   14747 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   14748 			build = nvm_data & NVM_BUILD_MASK;
   14749 			have_build = true;
   14750 		} else
   14751 			minor = nvm_data & 0x00ff;
   14752 
   14753 		/* Decimal */
   14754 		minor = (minor / 16) * 10 + (minor % 16);
   14755 		sc->sc_nvm_ver_major = major;
   14756 		sc->sc_nvm_ver_minor = minor;
   14757 
   14758 printver:
   14759 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   14760 		    sc->sc_nvm_ver_minor);
   14761 		if (have_build) {
   14762 			sc->sc_nvm_ver_build = build;
   14763 			aprint_verbose(".%d", build);
   14764 		}
   14765 	}
   14766 
   14767 	/* Assume the Option ROM area is at avove NVM_SIZE */
   14768 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   14769 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   14770 		/* Option ROM Version */
   14771 		if ((off != 0x0000) && (off != 0xffff)) {
   14772 			int rv;
   14773 
   14774 			off += NVM_COMBO_VER_OFF;
   14775 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   14776 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   14777 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   14778 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   14779 				/* 16bits */
   14780 				major = uid0 >> 8;
   14781 				build = (uid0 << 8) | (uid1 >> 8);
   14782 				patch = uid1 & 0x00ff;
   14783 				aprint_verbose(", option ROM Version %d.%d.%d",
   14784 				    major, build, patch);
   14785 			}
   14786 		}
   14787 	}
   14788 
   14789 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   14790 		aprint_verbose(", Image Unique ID %08x",
   14791 		    ((uint32_t)uid1 << 16) | uid0);
   14792 }
   14793 
   14794 /*
   14795  * wm_nvm_read:
   14796  *
   14797  *	Read data from the serial EEPROM.
   14798  */
   14799 static int
   14800 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14801 {
   14802 	int rv;
   14803 
   14804 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14805 		device_xname(sc->sc_dev), __func__));
   14806 
   14807 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   14808 		return -1;
   14809 
   14810 	rv = sc->nvm.read(sc, word, wordcnt, data);
   14811 
   14812 	return rv;
   14813 }
   14814 
   14815 /*
   14816  * Hardware semaphores.
   14817  * Very complexed...
   14818  */
   14819 
   14820 static int
   14821 wm_get_null(struct wm_softc *sc)
   14822 {
   14823 
   14824 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14825 		device_xname(sc->sc_dev), __func__));
   14826 	return 0;
   14827 }
   14828 
   14829 static void
   14830 wm_put_null(struct wm_softc *sc)
   14831 {
   14832 
   14833 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14834 		device_xname(sc->sc_dev), __func__));
   14835 	return;
   14836 }
   14837 
   14838 static int
   14839 wm_get_eecd(struct wm_softc *sc)
   14840 {
   14841 	uint32_t reg;
   14842 	int x;
   14843 
   14844 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14845 		device_xname(sc->sc_dev), __func__));
   14846 
   14847 	reg = CSR_READ(sc, WMREG_EECD);
   14848 
   14849 	/* Request EEPROM access. */
   14850 	reg |= EECD_EE_REQ;
   14851 	CSR_WRITE(sc, WMREG_EECD, reg);
   14852 
   14853 	/* ..and wait for it to be granted. */
   14854 	for (x = 0; x < 1000; x++) {
   14855 		reg = CSR_READ(sc, WMREG_EECD);
   14856 		if (reg & EECD_EE_GNT)
   14857 			break;
   14858 		delay(5);
   14859 	}
   14860 	if ((reg & EECD_EE_GNT) == 0) {
   14861 		aprint_error_dev(sc->sc_dev,
   14862 		    "could not acquire EEPROM GNT\n");
   14863 		reg &= ~EECD_EE_REQ;
   14864 		CSR_WRITE(sc, WMREG_EECD, reg);
   14865 		return -1;
   14866 	}
   14867 
   14868 	return 0;
   14869 }
   14870 
   14871 static void
   14872 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   14873 {
   14874 
   14875 	*eecd |= EECD_SK;
   14876 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14877 	CSR_WRITE_FLUSH(sc);
   14878 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14879 		delay(1);
   14880 	else
   14881 		delay(50);
   14882 }
   14883 
   14884 static void
   14885 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   14886 {
   14887 
   14888 	*eecd &= ~EECD_SK;
   14889 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14890 	CSR_WRITE_FLUSH(sc);
   14891 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14892 		delay(1);
   14893 	else
   14894 		delay(50);
   14895 }
   14896 
   14897 static void
   14898 wm_put_eecd(struct wm_softc *sc)
   14899 {
   14900 	uint32_t reg;
   14901 
   14902 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14903 		device_xname(sc->sc_dev), __func__));
   14904 
   14905 	/* Stop nvm */
   14906 	reg = CSR_READ(sc, WMREG_EECD);
   14907 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   14908 		/* Pull CS high */
   14909 		reg |= EECD_CS;
   14910 		wm_nvm_eec_clock_lower(sc, &reg);
   14911 	} else {
   14912 		/* CS on Microwire is active-high */
   14913 		reg &= ~(EECD_CS | EECD_DI);
   14914 		CSR_WRITE(sc, WMREG_EECD, reg);
   14915 		wm_nvm_eec_clock_raise(sc, &reg);
   14916 		wm_nvm_eec_clock_lower(sc, &reg);
   14917 	}
   14918 
   14919 	reg = CSR_READ(sc, WMREG_EECD);
   14920 	reg &= ~EECD_EE_REQ;
   14921 	CSR_WRITE(sc, WMREG_EECD, reg);
   14922 
   14923 	return;
   14924 }
   14925 
   14926 /*
   14927  * Get hardware semaphore.
   14928  * Same as e1000_get_hw_semaphore_generic()
   14929  */
   14930 static int
   14931 wm_get_swsm_semaphore(struct wm_softc *sc)
   14932 {
   14933 	int32_t timeout;
   14934 	uint32_t swsm;
   14935 
   14936 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14937 		device_xname(sc->sc_dev), __func__));
   14938 	KASSERT(sc->sc_nvm_wordsize > 0);
   14939 
   14940 retry:
   14941 	/* Get the SW semaphore. */
   14942 	timeout = sc->sc_nvm_wordsize + 1;
   14943 	while (timeout) {
   14944 		swsm = CSR_READ(sc, WMREG_SWSM);
   14945 
   14946 		if ((swsm & SWSM_SMBI) == 0)
   14947 			break;
   14948 
   14949 		delay(50);
   14950 		timeout--;
   14951 	}
   14952 
   14953 	if (timeout == 0) {
   14954 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   14955 			/*
   14956 			 * In rare circumstances, the SW semaphore may already
   14957 			 * be held unintentionally. Clear the semaphore once
   14958 			 * before giving up.
   14959 			 */
   14960 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   14961 			wm_put_swsm_semaphore(sc);
   14962 			goto retry;
   14963 		}
   14964 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
   14965 		return -1;
   14966 	}
   14967 
   14968 	/* Get the FW semaphore. */
   14969 	timeout = sc->sc_nvm_wordsize + 1;
   14970 	while (timeout) {
   14971 		swsm = CSR_READ(sc, WMREG_SWSM);
   14972 		swsm |= SWSM_SWESMBI;
   14973 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   14974 		/* If we managed to set the bit we got the semaphore. */
   14975 		swsm = CSR_READ(sc, WMREG_SWSM);
   14976 		if (swsm & SWSM_SWESMBI)
   14977 			break;
   14978 
   14979 		delay(50);
   14980 		timeout--;
   14981 	}
   14982 
   14983 	if (timeout == 0) {
   14984 		aprint_error_dev(sc->sc_dev,
   14985 		    "could not acquire SWSM SWESMBI\n");
   14986 		/* Release semaphores */
   14987 		wm_put_swsm_semaphore(sc);
   14988 		return -1;
   14989 	}
   14990 	return 0;
   14991 }
   14992 
   14993 /*
   14994  * Put hardware semaphore.
   14995  * Same as e1000_put_hw_semaphore_generic()
   14996  */
   14997 static void
   14998 wm_put_swsm_semaphore(struct wm_softc *sc)
   14999 {
   15000 	uint32_t swsm;
   15001 
   15002 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15003 		device_xname(sc->sc_dev), __func__));
   15004 
   15005 	swsm = CSR_READ(sc, WMREG_SWSM);
   15006 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   15007 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   15008 }
   15009 
   15010 /*
   15011  * Get SW/FW semaphore.
   15012  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   15013  */
   15014 static int
   15015 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   15016 {
   15017 	uint32_t swfw_sync;
   15018 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   15019 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   15020 	int timeout;
   15021 
   15022 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15023 		device_xname(sc->sc_dev), __func__));
   15024 
   15025 	if (sc->sc_type == WM_T_80003)
   15026 		timeout = 50;
   15027 	else
   15028 		timeout = 200;
   15029 
   15030 	while (timeout) {
   15031 		if (wm_get_swsm_semaphore(sc)) {
   15032 			aprint_error_dev(sc->sc_dev,
   15033 			    "%s: failed to get semaphore\n",
   15034 			    __func__);
   15035 			return -1;
   15036 		}
   15037 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   15038 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   15039 			swfw_sync |= swmask;
   15040 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   15041 			wm_put_swsm_semaphore(sc);
   15042 			return 0;
   15043 		}
   15044 		wm_put_swsm_semaphore(sc);
   15045 		delay(5000);
   15046 		timeout--;
   15047 	}
   15048 	device_printf(sc->sc_dev,
   15049 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   15050 	    mask, swfw_sync);
   15051 	return -1;
   15052 }
   15053 
   15054 static void
   15055 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   15056 {
   15057 	uint32_t swfw_sync;
   15058 
   15059 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15060 		device_xname(sc->sc_dev), __func__));
   15061 
   15062 	while (wm_get_swsm_semaphore(sc) != 0)
   15063 		continue;
   15064 
   15065 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   15066 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   15067 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   15068 
   15069 	wm_put_swsm_semaphore(sc);
   15070 }
   15071 
   15072 static int
   15073 wm_get_nvm_80003(struct wm_softc *sc)
   15074 {
   15075 	int rv;
   15076 
   15077 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   15078 		device_xname(sc->sc_dev), __func__));
   15079 
   15080 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   15081 		aprint_error_dev(sc->sc_dev,
   15082 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   15083 		return rv;
   15084 	}
   15085 
   15086 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15087 	    && (rv = wm_get_eecd(sc)) != 0) {
   15088 		aprint_error_dev(sc->sc_dev,
   15089 		    "%s: failed to get semaphore(EECD)\n", __func__);
   15090 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15091 		return rv;
   15092 	}
   15093 
   15094 	return 0;
   15095 }
   15096 
   15097 static void
   15098 wm_put_nvm_80003(struct wm_softc *sc)
   15099 {
   15100 
   15101 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15102 		device_xname(sc->sc_dev), __func__));
   15103 
   15104 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15105 		wm_put_eecd(sc);
   15106 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15107 }
   15108 
   15109 static int
   15110 wm_get_nvm_82571(struct wm_softc *sc)
   15111 {
   15112 	int rv;
   15113 
   15114 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15115 		device_xname(sc->sc_dev), __func__));
   15116 
   15117 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   15118 		return rv;
   15119 
   15120 	switch (sc->sc_type) {
   15121 	case WM_T_82573:
   15122 		break;
   15123 	default:
   15124 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15125 			rv = wm_get_eecd(sc);
   15126 		break;
   15127 	}
   15128 
   15129 	if (rv != 0) {
   15130 		aprint_error_dev(sc->sc_dev,
   15131 		    "%s: failed to get semaphore\n",
   15132 		    __func__);
   15133 		wm_put_swsm_semaphore(sc);
   15134 	}
   15135 
   15136 	return rv;
   15137 }
   15138 
   15139 static void
   15140 wm_put_nvm_82571(struct wm_softc *sc)
   15141 {
   15142 
   15143 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15144 		device_xname(sc->sc_dev), __func__));
   15145 
   15146 	switch (sc->sc_type) {
   15147 	case WM_T_82573:
   15148 		break;
   15149 	default:
   15150 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15151 			wm_put_eecd(sc);
   15152 		break;
   15153 	}
   15154 
   15155 	wm_put_swsm_semaphore(sc);
   15156 }
   15157 
   15158 static int
   15159 wm_get_phy_82575(struct wm_softc *sc)
   15160 {
   15161 
   15162 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15163 		device_xname(sc->sc_dev), __func__));
   15164 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15165 }
   15166 
   15167 static void
   15168 wm_put_phy_82575(struct wm_softc *sc)
   15169 {
   15170 
   15171 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15172 		device_xname(sc->sc_dev), __func__));
   15173 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15174 }
   15175 
   15176 static int
   15177 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   15178 {
   15179 	uint32_t ext_ctrl;
   15180 	int timeout = 200;
   15181 
   15182 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15183 		device_xname(sc->sc_dev), __func__));
   15184 
   15185 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15186 	for (timeout = 0; timeout < 200; timeout++) {
   15187 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15188 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15189 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15190 
   15191 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15192 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15193 			return 0;
   15194 		delay(5000);
   15195 	}
   15196 	device_printf(sc->sc_dev,
   15197 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   15198 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15199 	return -1;
   15200 }
   15201 
   15202 static void
   15203 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   15204 {
   15205 	uint32_t ext_ctrl;
   15206 
   15207 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15208 		device_xname(sc->sc_dev), __func__));
   15209 
   15210 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15211 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15212 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15213 
   15214 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15215 }
   15216 
   15217 static int
   15218 wm_get_swflag_ich8lan(struct wm_softc *sc)
   15219 {
   15220 	uint32_t ext_ctrl;
   15221 	int timeout;
   15222 
   15223 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15224 		device_xname(sc->sc_dev), __func__));
   15225 	mutex_enter(sc->sc_ich_phymtx);
   15226 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   15227 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15228 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   15229 			break;
   15230 		delay(1000);
   15231 	}
   15232 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   15233 		device_printf(sc->sc_dev,
   15234 		    "SW has already locked the resource\n");
   15235 		goto out;
   15236 	}
   15237 
   15238 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15239 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15240 	for (timeout = 0; timeout < 1000; timeout++) {
   15241 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15242 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15243 			break;
   15244 		delay(1000);
   15245 	}
   15246 	if (timeout >= 1000) {
   15247 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   15248 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15249 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15250 		goto out;
   15251 	}
   15252 	return 0;
   15253 
   15254 out:
   15255 	mutex_exit(sc->sc_ich_phymtx);
   15256 	return -1;
   15257 }
   15258 
   15259 static void
   15260 wm_put_swflag_ich8lan(struct wm_softc *sc)
   15261 {
   15262 	uint32_t ext_ctrl;
   15263 
   15264 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15265 		device_xname(sc->sc_dev), __func__));
   15266 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15267 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   15268 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15269 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15270 	} else
   15271 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   15272 
   15273 	mutex_exit(sc->sc_ich_phymtx);
   15274 }
   15275 
   15276 static int
   15277 wm_get_nvm_ich8lan(struct wm_softc *sc)
   15278 {
   15279 
   15280 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15281 		device_xname(sc->sc_dev), __func__));
   15282 	mutex_enter(sc->sc_ich_nvmmtx);
   15283 
   15284 	return 0;
   15285 }
   15286 
   15287 static void
   15288 wm_put_nvm_ich8lan(struct wm_softc *sc)
   15289 {
   15290 
   15291 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15292 		device_xname(sc->sc_dev), __func__));
   15293 	mutex_exit(sc->sc_ich_nvmmtx);
   15294 }
   15295 
   15296 static int
   15297 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   15298 {
   15299 	int i = 0;
   15300 	uint32_t reg;
   15301 
   15302 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15303 		device_xname(sc->sc_dev), __func__));
   15304 
   15305 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15306 	do {
   15307 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   15308 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15309 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15310 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   15311 			break;
   15312 		delay(2*1000);
   15313 		i++;
   15314 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   15315 
   15316 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   15317 		wm_put_hw_semaphore_82573(sc);
   15318 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   15319 		    device_xname(sc->sc_dev));
   15320 		return -1;
   15321 	}
   15322 
   15323 	return 0;
   15324 }
   15325 
   15326 static void
   15327 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   15328 {
   15329 	uint32_t reg;
   15330 
   15331 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15332 		device_xname(sc->sc_dev), __func__));
   15333 
   15334 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15335 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15336 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15337 }
   15338 
   15339 /*
   15340  * Management mode and power management related subroutines.
   15341  * BMC, AMT, suspend/resume and EEE.
   15342  */
   15343 
   15344 #ifdef WM_WOL
   15345 static int
   15346 wm_check_mng_mode(struct wm_softc *sc)
   15347 {
   15348 	int rv;
   15349 
   15350 	switch (sc->sc_type) {
   15351 	case WM_T_ICH8:
   15352 	case WM_T_ICH9:
   15353 	case WM_T_ICH10:
   15354 	case WM_T_PCH:
   15355 	case WM_T_PCH2:
   15356 	case WM_T_PCH_LPT:
   15357 	case WM_T_PCH_SPT:
   15358 	case WM_T_PCH_CNP:
   15359 		rv = wm_check_mng_mode_ich8lan(sc);
   15360 		break;
   15361 	case WM_T_82574:
   15362 	case WM_T_82583:
   15363 		rv = wm_check_mng_mode_82574(sc);
   15364 		break;
   15365 	case WM_T_82571:
   15366 	case WM_T_82572:
   15367 	case WM_T_82573:
   15368 	case WM_T_80003:
   15369 		rv = wm_check_mng_mode_generic(sc);
   15370 		break;
   15371 	default:
   15372 		/* Noting to do */
   15373 		rv = 0;
   15374 		break;
   15375 	}
   15376 
   15377 	return rv;
   15378 }
   15379 
   15380 static int
   15381 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   15382 {
   15383 	uint32_t fwsm;
   15384 
   15385 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15386 
   15387 	if (((fwsm & FWSM_FW_VALID) != 0)
   15388 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   15389 		return 1;
   15390 
   15391 	return 0;
   15392 }
   15393 
   15394 static int
   15395 wm_check_mng_mode_82574(struct wm_softc *sc)
   15396 {
   15397 	uint16_t data;
   15398 
   15399 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15400 
   15401 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   15402 		return 1;
   15403 
   15404 	return 0;
   15405 }
   15406 
   15407 static int
   15408 wm_check_mng_mode_generic(struct wm_softc *sc)
   15409 {
   15410 	uint32_t fwsm;
   15411 
   15412 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15413 
   15414 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   15415 		return 1;
   15416 
   15417 	return 0;
   15418 }
   15419 #endif /* WM_WOL */
   15420 
   15421 static int
   15422 wm_enable_mng_pass_thru(struct wm_softc *sc)
   15423 {
   15424 	uint32_t manc, fwsm, factps;
   15425 
   15426 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   15427 		return 0;
   15428 
   15429 	manc = CSR_READ(sc, WMREG_MANC);
   15430 
   15431 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   15432 		device_xname(sc->sc_dev), manc));
   15433 	if ((manc & MANC_RECV_TCO_EN) == 0)
   15434 		return 0;
   15435 
   15436 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   15437 		fwsm = CSR_READ(sc, WMREG_FWSM);
   15438 		factps = CSR_READ(sc, WMREG_FACTPS);
   15439 		if (((factps & FACTPS_MNGCG) == 0)
   15440 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   15441 			return 1;
   15442 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   15443 		uint16_t data;
   15444 
   15445 		factps = CSR_READ(sc, WMREG_FACTPS);
   15446 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15447 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   15448 			device_xname(sc->sc_dev), factps, data));
   15449 		if (((factps & FACTPS_MNGCG) == 0)
   15450 		    && ((data & NVM_CFG2_MNGM_MASK)
   15451 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   15452 			return 1;
   15453 	} else if (((manc & MANC_SMBUS_EN) != 0)
   15454 	    && ((manc & MANC_ASF_EN) == 0))
   15455 		return 1;
   15456 
   15457 	return 0;
   15458 }
   15459 
   15460 static bool
   15461 wm_phy_resetisblocked(struct wm_softc *sc)
   15462 {
   15463 	bool blocked = false;
   15464 	uint32_t reg;
   15465 	int i = 0;
   15466 
   15467 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15468 		device_xname(sc->sc_dev), __func__));
   15469 
   15470 	switch (sc->sc_type) {
   15471 	case WM_T_ICH8:
   15472 	case WM_T_ICH9:
   15473 	case WM_T_ICH10:
   15474 	case WM_T_PCH:
   15475 	case WM_T_PCH2:
   15476 	case WM_T_PCH_LPT:
   15477 	case WM_T_PCH_SPT:
   15478 	case WM_T_PCH_CNP:
   15479 		do {
   15480 			reg = CSR_READ(sc, WMREG_FWSM);
   15481 			if ((reg & FWSM_RSPCIPHY) == 0) {
   15482 				blocked = true;
   15483 				delay(10*1000);
   15484 				continue;
   15485 			}
   15486 			blocked = false;
   15487 		} while (blocked && (i++ < 30));
   15488 		return blocked;
   15489 		break;
   15490 	case WM_T_82571:
   15491 	case WM_T_82572:
   15492 	case WM_T_82573:
   15493 	case WM_T_82574:
   15494 	case WM_T_82583:
   15495 	case WM_T_80003:
   15496 		reg = CSR_READ(sc, WMREG_MANC);
   15497 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   15498 			return true;
   15499 		else
   15500 			return false;
   15501 		break;
   15502 	default:
   15503 		/* No problem */
   15504 		break;
   15505 	}
   15506 
   15507 	return false;
   15508 }
   15509 
   15510 static void
   15511 wm_get_hw_control(struct wm_softc *sc)
   15512 {
   15513 	uint32_t reg;
   15514 
   15515 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15516 		device_xname(sc->sc_dev), __func__));
   15517 
   15518 	if (sc->sc_type == WM_T_82573) {
   15519 		reg = CSR_READ(sc, WMREG_SWSM);
   15520 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   15521 	} else if (sc->sc_type >= WM_T_82571) {
   15522 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15523 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   15524 	}
   15525 }
   15526 
   15527 static void
   15528 wm_release_hw_control(struct wm_softc *sc)
   15529 {
   15530 	uint32_t reg;
   15531 
   15532 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15533 		device_xname(sc->sc_dev), __func__));
   15534 
   15535 	if (sc->sc_type == WM_T_82573) {
   15536 		reg = CSR_READ(sc, WMREG_SWSM);
   15537 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   15538 	} else if (sc->sc_type >= WM_T_82571) {
   15539 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15540 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   15541 	}
   15542 }
   15543 
   15544 static void
   15545 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   15546 {
   15547 	uint32_t reg;
   15548 
   15549 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15550 		device_xname(sc->sc_dev), __func__));
   15551 
   15552 	if (sc->sc_type < WM_T_PCH2)
   15553 		return;
   15554 
   15555 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15556 
   15557 	if (gate)
   15558 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   15559 	else
   15560 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   15561 
   15562 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15563 }
   15564 
   15565 static int
   15566 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   15567 {
   15568 	uint32_t fwsm, reg;
   15569 	int rv;
   15570 
   15571 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15572 		device_xname(sc->sc_dev), __func__));
   15573 
   15574 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   15575 	wm_gate_hw_phy_config_ich8lan(sc, true);
   15576 
   15577 	/* Disable ULP */
   15578 	wm_ulp_disable(sc);
   15579 
   15580 	/* Acquire PHY semaphore */
   15581 	rv = sc->phy.acquire(sc);
   15582 	if (rv != 0) {
   15583 		DPRINTF(sc, WM_DEBUG_INIT,
   15584 		    ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
   15585 		return rv;
   15586 	}
   15587 
   15588 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   15589 	 * inaccessible and resetting the PHY is not blocked, toggle the
   15590 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   15591 	 */
   15592 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15593 	switch (sc->sc_type) {
   15594 	case WM_T_PCH_LPT:
   15595 	case WM_T_PCH_SPT:
   15596 	case WM_T_PCH_CNP:
   15597 		if (wm_phy_is_accessible_pchlan(sc))
   15598 			break;
   15599 
   15600 		/* Before toggling LANPHYPC, see if PHY is accessible by
   15601 		 * forcing MAC to SMBus mode first.
   15602 		 */
   15603 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15604 		reg |= CTRL_EXT_FORCE_SMBUS;
   15605 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15606 #if 0
   15607 		/* XXX Isn't this required??? */
   15608 		CSR_WRITE_FLUSH(sc);
   15609 #endif
   15610 		/* Wait 50 milliseconds for MAC to finish any retries
   15611 		 * that it might be trying to perform from previous
   15612 		 * attempts to acknowledge any phy read requests.
   15613 		 */
   15614 		delay(50 * 1000);
   15615 		/* FALLTHROUGH */
   15616 	case WM_T_PCH2:
   15617 		if (wm_phy_is_accessible_pchlan(sc) == true)
   15618 			break;
   15619 		/* FALLTHROUGH */
   15620 	case WM_T_PCH:
   15621 		if (sc->sc_type == WM_T_PCH)
   15622 			if ((fwsm & FWSM_FW_VALID) != 0)
   15623 				break;
   15624 
   15625 		if (wm_phy_resetisblocked(sc) == true) {
   15626 			device_printf(sc->sc_dev, "XXX reset is blocked(2)\n");
   15627 			break;
   15628 		}
   15629 
   15630 		/* Toggle LANPHYPC Value bit */
   15631 		wm_toggle_lanphypc_pch_lpt(sc);
   15632 
   15633 		if (sc->sc_type >= WM_T_PCH_LPT) {
   15634 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15635 				break;
   15636 
   15637 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   15638 			 * so ensure that the MAC is also out of SMBus mode
   15639 			 */
   15640 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15641 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15642 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15643 
   15644 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15645 				break;
   15646 			rv = -1;
   15647 		}
   15648 		break;
   15649 	default:
   15650 		break;
   15651 	}
   15652 
   15653 	/* Release semaphore */
   15654 	sc->phy.release(sc);
   15655 
   15656 	if (rv == 0) {
   15657 		/* Check to see if able to reset PHY.  Print error if not */
   15658 		if (wm_phy_resetisblocked(sc)) {
   15659 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   15660 			goto out;
   15661 		}
   15662 
   15663 		/* Reset the PHY before any access to it.  Doing so, ensures
   15664 		 * that the PHY is in a known good state before we read/write
   15665 		 * PHY registers.  The generic reset is sufficient here,
   15666 		 * because we haven't determined the PHY type yet.
   15667 		 */
   15668 		if (wm_reset_phy(sc) != 0)
   15669 			goto out;
   15670 
   15671 		/* On a successful reset, possibly need to wait for the PHY
   15672 		 * to quiesce to an accessible state before returning control
   15673 		 * to the calling function.  If the PHY does not quiesce, then
   15674 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   15675 		 *  the PHY is in.
   15676 		 */
   15677 		if (wm_phy_resetisblocked(sc))
   15678 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15679 	}
   15680 
   15681 out:
   15682 	/* Ungate automatic PHY configuration on non-managed 82579 */
   15683 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   15684 		delay(10*1000);
   15685 		wm_gate_hw_phy_config_ich8lan(sc, false);
   15686 	}
   15687 
   15688 	return 0;
   15689 }
   15690 
   15691 static void
   15692 wm_init_manageability(struct wm_softc *sc)
   15693 {
   15694 
   15695 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15696 		device_xname(sc->sc_dev), __func__));
   15697 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
   15698 
   15699 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15700 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   15701 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15702 
   15703 		/* Disable hardware interception of ARP */
   15704 		manc &= ~MANC_ARP_EN;
   15705 
   15706 		/* Enable receiving management packets to the host */
   15707 		if (sc->sc_type >= WM_T_82571) {
   15708 			manc |= MANC_EN_MNG2HOST;
   15709 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   15710 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   15711 		}
   15712 
   15713 		CSR_WRITE(sc, WMREG_MANC, manc);
   15714 	}
   15715 }
   15716 
   15717 static void
   15718 wm_release_manageability(struct wm_softc *sc)
   15719 {
   15720 
   15721 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15722 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15723 
   15724 		manc |= MANC_ARP_EN;
   15725 		if (sc->sc_type >= WM_T_82571)
   15726 			manc &= ~MANC_EN_MNG2HOST;
   15727 
   15728 		CSR_WRITE(sc, WMREG_MANC, manc);
   15729 	}
   15730 }
   15731 
   15732 static void
   15733 wm_get_wakeup(struct wm_softc *sc)
   15734 {
   15735 
   15736 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   15737 	switch (sc->sc_type) {
   15738 	case WM_T_82573:
   15739 	case WM_T_82583:
   15740 		sc->sc_flags |= WM_F_HAS_AMT;
   15741 		/* FALLTHROUGH */
   15742 	case WM_T_80003:
   15743 	case WM_T_82575:
   15744 	case WM_T_82576:
   15745 	case WM_T_82580:
   15746 	case WM_T_I350:
   15747 	case WM_T_I354:
   15748 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   15749 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   15750 		/* FALLTHROUGH */
   15751 	case WM_T_82541:
   15752 	case WM_T_82541_2:
   15753 	case WM_T_82547:
   15754 	case WM_T_82547_2:
   15755 	case WM_T_82571:
   15756 	case WM_T_82572:
   15757 	case WM_T_82574:
   15758 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15759 		break;
   15760 	case WM_T_ICH8:
   15761 	case WM_T_ICH9:
   15762 	case WM_T_ICH10:
   15763 	case WM_T_PCH:
   15764 	case WM_T_PCH2:
   15765 	case WM_T_PCH_LPT:
   15766 	case WM_T_PCH_SPT:
   15767 	case WM_T_PCH_CNP:
   15768 		sc->sc_flags |= WM_F_HAS_AMT;
   15769 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15770 		break;
   15771 	default:
   15772 		break;
   15773 	}
   15774 
   15775 	/* 1: HAS_MANAGE */
   15776 	if (wm_enable_mng_pass_thru(sc) != 0)
   15777 		sc->sc_flags |= WM_F_HAS_MANAGE;
   15778 
   15779 	/*
   15780 	 * Note that the WOL flags is set after the resetting of the eeprom
   15781 	 * stuff
   15782 	 */
   15783 }
   15784 
   15785 /*
   15786  * Unconfigure Ultra Low Power mode.
   15787  * Only for I217 and newer (see below).
   15788  */
   15789 static int
   15790 wm_ulp_disable(struct wm_softc *sc)
   15791 {
   15792 	uint32_t reg;
   15793 	uint16_t phyreg;
   15794 	int i = 0, rv;
   15795 
   15796 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15797 		device_xname(sc->sc_dev), __func__));
   15798 	/* Exclude old devices */
   15799 	if ((sc->sc_type < WM_T_PCH_LPT)
   15800 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   15801 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   15802 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   15803 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   15804 		return 0;
   15805 
   15806 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   15807 		/* Request ME un-configure ULP mode in the PHY */
   15808 		reg = CSR_READ(sc, WMREG_H2ME);
   15809 		reg &= ~H2ME_ULP;
   15810 		reg |= H2ME_ENFORCE_SETTINGS;
   15811 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15812 
   15813 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   15814 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   15815 			if (i++ == 30) {
   15816 				device_printf(sc->sc_dev, "%s timed out\n",
   15817 				    __func__);
   15818 				return -1;
   15819 			}
   15820 			delay(10 * 1000);
   15821 		}
   15822 		reg = CSR_READ(sc, WMREG_H2ME);
   15823 		reg &= ~H2ME_ENFORCE_SETTINGS;
   15824 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15825 
   15826 		return 0;
   15827 	}
   15828 
   15829 	/* Acquire semaphore */
   15830 	rv = sc->phy.acquire(sc);
   15831 	if (rv != 0) {
   15832 		DPRINTF(sc, WM_DEBUG_INIT,
   15833 		    ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
   15834 		return rv;
   15835 	}
   15836 
   15837 	/* Toggle LANPHYPC */
   15838 	wm_toggle_lanphypc_pch_lpt(sc);
   15839 
   15840 	/* Unforce SMBus mode in PHY */
   15841 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   15842 	if (rv != 0) {
   15843 		uint32_t reg2;
   15844 
   15845 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   15846 		    __func__);
   15847 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   15848 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   15849 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   15850 		delay(50 * 1000);
   15851 
   15852 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   15853 		    &phyreg);
   15854 		if (rv != 0)
   15855 			goto release;
   15856 	}
   15857 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15858 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   15859 
   15860 	/* Unforce SMBus mode in MAC */
   15861 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15862 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   15863 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15864 
   15865 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   15866 	if (rv != 0)
   15867 		goto release;
   15868 	phyreg |= HV_PM_CTRL_K1_ENA;
   15869 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   15870 
   15871 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   15872 	    &phyreg);
   15873 	if (rv != 0)
   15874 		goto release;
   15875 	phyreg &= ~(I218_ULP_CONFIG1_IND
   15876 	    | I218_ULP_CONFIG1_STICKY_ULP
   15877 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   15878 	    | I218_ULP_CONFIG1_WOL_HOST
   15879 	    | I218_ULP_CONFIG1_INBAND_EXIT
   15880 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   15881 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   15882 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   15883 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15884 	phyreg |= I218_ULP_CONFIG1_START;
   15885 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15886 
   15887 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15888 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   15889 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15890 
   15891 release:
   15892 	/* Release semaphore */
   15893 	sc->phy.release(sc);
   15894 	wm_gmii_reset(sc);
   15895 	delay(50 * 1000);
   15896 
   15897 	return rv;
   15898 }
   15899 
   15900 /* WOL in the newer chipset interfaces (pchlan) */
   15901 static int
   15902 wm_enable_phy_wakeup(struct wm_softc *sc)
   15903 {
   15904 	device_t dev = sc->sc_dev;
   15905 	uint32_t mreg, moff;
   15906 	uint16_t wuce, wuc, wufc, preg;
   15907 	int i, rv;
   15908 
   15909 	KASSERT(sc->sc_type >= WM_T_PCH);
   15910 
   15911 	/* Copy MAC RARs to PHY RARs */
   15912 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   15913 
   15914 	/* Activate PHY wakeup */
   15915 	rv = sc->phy.acquire(sc);
   15916 	if (rv != 0) {
   15917 		device_printf(dev, "%s: failed to acquire semaphore\n",
   15918 		    __func__);
   15919 		return rv;
   15920 	}
   15921 
   15922 	/*
   15923 	 * Enable access to PHY wakeup registers.
   15924 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   15925 	 */
   15926 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   15927 	if (rv != 0) {
   15928 		device_printf(dev,
   15929 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   15930 		goto release;
   15931 	}
   15932 
   15933 	/* Copy MAC MTA to PHY MTA */
   15934 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   15935 		uint16_t lo, hi;
   15936 
   15937 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   15938 		lo = (uint16_t)(mreg & 0xffff);
   15939 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   15940 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   15941 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   15942 	}
   15943 
   15944 	/* Configure PHY Rx Control register */
   15945 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   15946 	mreg = CSR_READ(sc, WMREG_RCTL);
   15947 	if (mreg & RCTL_UPE)
   15948 		preg |= BM_RCTL_UPE;
   15949 	if (mreg & RCTL_MPE)
   15950 		preg |= BM_RCTL_MPE;
   15951 	preg &= ~(BM_RCTL_MO_MASK);
   15952 	moff = __SHIFTOUT(mreg, RCTL_MO);
   15953 	if (moff != 0)
   15954 		preg |= moff << BM_RCTL_MO_SHIFT;
   15955 	if (mreg & RCTL_BAM)
   15956 		preg |= BM_RCTL_BAM;
   15957 	if (mreg & RCTL_PMCF)
   15958 		preg |= BM_RCTL_PMCF;
   15959 	mreg = CSR_READ(sc, WMREG_CTRL);
   15960 	if (mreg & CTRL_RFCE)
   15961 		preg |= BM_RCTL_RFCE;
   15962 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   15963 
   15964 	wuc = WUC_APME | WUC_PME_EN;
   15965 	wufc = WUFC_MAG;
   15966 	/* Enable PHY wakeup in MAC register */
   15967 	CSR_WRITE(sc, WMREG_WUC,
   15968 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   15969 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   15970 
   15971 	/* Configure and enable PHY wakeup in PHY registers */
   15972 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   15973 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   15974 
   15975 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   15976 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15977 
   15978 release:
   15979 	sc->phy.release(sc);
   15980 
   15981 	return 0;
   15982 }
   15983 
   15984 /* Power down workaround on D3 */
   15985 static void
   15986 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   15987 {
   15988 	uint32_t reg;
   15989 	uint16_t phyreg;
   15990 	int i;
   15991 
   15992 	for (i = 0; i < 2; i++) {
   15993 		/* Disable link */
   15994 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15995 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15996 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15997 
   15998 		/*
   15999 		 * Call gig speed drop workaround on Gig disable before
   16000 		 * accessing any PHY registers
   16001 		 */
   16002 		if (sc->sc_type == WM_T_ICH8)
   16003 			wm_gig_downshift_workaround_ich8lan(sc);
   16004 
   16005 		/* Write VR power-down enable */
   16006 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   16007 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   16008 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   16009 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   16010 
   16011 		/* Read it back and test */
   16012 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   16013 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   16014 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   16015 			break;
   16016 
   16017 		/* Issue PHY reset and repeat at most one more time */
   16018 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   16019 	}
   16020 }
   16021 
   16022 /*
   16023  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   16024  *  @sc: pointer to the HW structure
   16025  *
   16026  *  During S0 to Sx transition, it is possible the link remains at gig
   16027  *  instead of negotiating to a lower speed.  Before going to Sx, set
   16028  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   16029  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   16030  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   16031  *  needs to be written.
   16032  *  Parts that support (and are linked to a partner which support) EEE in
   16033  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   16034  *  than 10Mbps w/o EEE.
   16035  */
   16036 static void
   16037 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   16038 {
   16039 	device_t dev = sc->sc_dev;
   16040 	struct ethercom *ec = &sc->sc_ethercom;
   16041 	uint32_t phy_ctrl;
   16042 	int rv;
   16043 
   16044 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   16045 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   16046 
   16047 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   16048 
   16049 	if (sc->sc_phytype == WMPHY_I217) {
   16050 		uint16_t devid = sc->sc_pcidevid;
   16051 
   16052 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   16053 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   16054 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   16055 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   16056 		    (sc->sc_type >= WM_T_PCH_SPT))
   16057 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   16058 			    CSR_READ(sc, WMREG_FEXTNVM6)
   16059 			    & ~FEXTNVM6_REQ_PLL_CLK);
   16060 
   16061 		if (sc->phy.acquire(sc) != 0)
   16062 			goto out;
   16063 
   16064 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16065 			uint16_t eee_advert;
   16066 
   16067 			rv = wm_read_emi_reg_locked(dev,
   16068 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   16069 			if (rv)
   16070 				goto release;
   16071 
   16072 			/*
   16073 			 * Disable LPLU if both link partners support 100BaseT
   16074 			 * EEE and 100Full is advertised on both ends of the
   16075 			 * link, and enable Auto Enable LPI since there will
   16076 			 * be no driver to enable LPI while in Sx.
   16077 			 */
   16078 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   16079 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   16080 				uint16_t anar, phy_reg;
   16081 
   16082 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   16083 				    &anar);
   16084 				if (anar & ANAR_TX_FD) {
   16085 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   16086 					    PHY_CTRL_NOND0A_LPLU);
   16087 
   16088 					/* Set Auto Enable LPI after link up */
   16089 					sc->phy.readreg_locked(dev, 2,
   16090 					    I217_LPI_GPIO_CTRL, &phy_reg);
   16091 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16092 					sc->phy.writereg_locked(dev, 2,
   16093 					    I217_LPI_GPIO_CTRL, phy_reg);
   16094 				}
   16095 			}
   16096 		}
   16097 
   16098 		/*
   16099 		 * For i217 Intel Rapid Start Technology support,
   16100 		 * when the system is going into Sx and no manageability engine
   16101 		 * is present, the driver must configure proxy to reset only on
   16102 		 * power good.	LPI (Low Power Idle) state must also reset only
   16103 		 * on power good, as well as the MTA (Multicast table array).
   16104 		 * The SMBus release must also be disabled on LCD reset.
   16105 		 */
   16106 
   16107 		/*
   16108 		 * Enable MTA to reset for Intel Rapid Start Technology
   16109 		 * Support
   16110 		 */
   16111 
   16112 release:
   16113 		sc->phy.release(sc);
   16114 	}
   16115 out:
   16116 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   16117 
   16118 	if (sc->sc_type == WM_T_ICH8)
   16119 		wm_gig_downshift_workaround_ich8lan(sc);
   16120 
   16121 	if (sc->sc_type >= WM_T_PCH) {
   16122 		wm_oem_bits_config_ich8lan(sc, false);
   16123 
   16124 		/* Reset PHY to activate OEM bits on 82577/8 */
   16125 		if (sc->sc_type == WM_T_PCH)
   16126 			wm_reset_phy(sc);
   16127 
   16128 		if (sc->phy.acquire(sc) != 0)
   16129 			return;
   16130 		wm_write_smbus_addr(sc);
   16131 		sc->phy.release(sc);
   16132 	}
   16133 }
   16134 
   16135 /*
   16136  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   16137  *  @sc: pointer to the HW structure
   16138  *
   16139  *  During Sx to S0 transitions on non-managed devices or managed devices
   16140  *  on which PHY resets are not blocked, if the PHY registers cannot be
   16141  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   16142  *  the PHY.
   16143  *  On i217, setup Intel Rapid Start Technology.
   16144  */
   16145 static int
   16146 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   16147 {
   16148 	device_t dev = sc->sc_dev;
   16149 	int rv;
   16150 
   16151 	if (sc->sc_type < WM_T_PCH2)
   16152 		return 0;
   16153 
   16154 	rv = wm_init_phy_workarounds_pchlan(sc);
   16155 	if (rv != 0)
   16156 		return rv;
   16157 
   16158 	/* For i217 Intel Rapid Start Technology support when the system
   16159 	 * is transitioning from Sx and no manageability engine is present
   16160 	 * configure SMBus to restore on reset, disable proxy, and enable
   16161 	 * the reset on MTA (Multicast table array).
   16162 	 */
   16163 	if (sc->sc_phytype == WMPHY_I217) {
   16164 		uint16_t phy_reg;
   16165 
   16166 		rv = sc->phy.acquire(sc);
   16167 		if (rv != 0)
   16168 			return rv;
   16169 
   16170 		/* Clear Auto Enable LPI after link up */
   16171 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   16172 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16173 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   16174 
   16175 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16176 			/* Restore clear on SMB if no manageability engine
   16177 			 * is present
   16178 			 */
   16179 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   16180 			    &phy_reg);
   16181 			if (rv != 0)
   16182 				goto release;
   16183 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   16184 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   16185 
   16186 			/* Disable Proxy */
   16187 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   16188 		}
   16189 		/* Enable reset on MTA */
   16190 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   16191 		if (rv != 0)
   16192 			goto release;
   16193 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   16194 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   16195 
   16196 release:
   16197 		sc->phy.release(sc);
   16198 		return rv;
   16199 	}
   16200 
   16201 	return 0;
   16202 }
   16203 
   16204 static void
   16205 wm_enable_wakeup(struct wm_softc *sc)
   16206 {
   16207 	uint32_t reg, pmreg;
   16208 	pcireg_t pmode;
   16209 	int rv = 0;
   16210 
   16211 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16212 		device_xname(sc->sc_dev), __func__));
   16213 
   16214 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16215 	    &pmreg, NULL) == 0)
   16216 		return;
   16217 
   16218 	if ((sc->sc_flags & WM_F_WOL) == 0)
   16219 		goto pme;
   16220 
   16221 	/* Advertise the wakeup capability */
   16222 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   16223 	    | CTRL_SWDPIN(3));
   16224 
   16225 	/* Keep the laser running on fiber adapters */
   16226 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   16227 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   16228 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16229 		reg |= CTRL_EXT_SWDPIN(3);
   16230 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16231 	}
   16232 
   16233 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   16234 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   16235 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   16236 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   16237 		wm_suspend_workarounds_ich8lan(sc);
   16238 
   16239 #if 0	/* For the multicast packet */
   16240 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   16241 	reg |= WUFC_MC;
   16242 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   16243 #endif
   16244 
   16245 	if (sc->sc_type >= WM_T_PCH) {
   16246 		rv = wm_enable_phy_wakeup(sc);
   16247 		if (rv != 0)
   16248 			goto pme;
   16249 	} else {
   16250 		/* Enable wakeup by the MAC */
   16251 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   16252 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   16253 	}
   16254 
   16255 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   16256 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   16257 		|| (sc->sc_type == WM_T_PCH2))
   16258 	    && (sc->sc_phytype == WMPHY_IGP_3))
   16259 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   16260 
   16261 pme:
   16262 	/* Request PME */
   16263 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   16264 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   16265 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   16266 		/* For WOL */
   16267 		pmode |= PCI_PMCSR_PME_EN;
   16268 	} else {
   16269 		/* Disable WOL */
   16270 		pmode &= ~PCI_PMCSR_PME_EN;
   16271 	}
   16272 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   16273 }
   16274 
   16275 /* Disable ASPM L0s and/or L1 for workaround */
   16276 static void
   16277 wm_disable_aspm(struct wm_softc *sc)
   16278 {
   16279 	pcireg_t reg, mask = 0;
   16280 	unsigned const char *str = "";
   16281 
   16282 	/*
   16283 	 *  Only for PCIe device which has PCIe capability in the PCI config
   16284 	 * space.
   16285 	 */
   16286 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   16287 		return;
   16288 
   16289 	switch (sc->sc_type) {
   16290 	case WM_T_82571:
   16291 	case WM_T_82572:
   16292 		/*
   16293 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   16294 		 * State Power management L1 State (ASPM L1).
   16295 		 */
   16296 		mask = PCIE_LCSR_ASPM_L1;
   16297 		str = "L1 is";
   16298 		break;
   16299 	case WM_T_82573:
   16300 	case WM_T_82574:
   16301 	case WM_T_82583:
   16302 		/*
   16303 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   16304 		 *
   16305 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   16306 		 * some chipset.  The document of 82574 and 82583 says that
   16307 		 * disabling L0s with some specific chipset is sufficient,
   16308 		 * but we follow as of the Intel em driver does.
   16309 		 *
   16310 		 * References:
   16311 		 * Errata 8 of the Specification Update of i82573.
   16312 		 * Errata 20 of the Specification Update of i82574.
   16313 		 * Errata 9 of the Specification Update of i82583.
   16314 		 */
   16315 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   16316 		str = "L0s and L1 are";
   16317 		break;
   16318 	default:
   16319 		return;
   16320 	}
   16321 
   16322 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16323 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   16324 	reg &= ~mask;
   16325 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16326 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   16327 
   16328 	/* Print only in wm_attach() */
   16329 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   16330 		aprint_verbose_dev(sc->sc_dev,
   16331 		    "ASPM %s disabled to workaround the errata.\n", str);
   16332 }
   16333 
   16334 /* LPLU */
   16335 
   16336 static void
   16337 wm_lplu_d0_disable(struct wm_softc *sc)
   16338 {
   16339 	struct mii_data *mii = &sc->sc_mii;
   16340 	uint32_t reg;
   16341 	uint16_t phyval;
   16342 
   16343 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16344 		device_xname(sc->sc_dev), __func__));
   16345 
   16346 	if (sc->sc_phytype == WMPHY_IFE)
   16347 		return;
   16348 
   16349 	switch (sc->sc_type) {
   16350 	case WM_T_82571:
   16351 	case WM_T_82572:
   16352 	case WM_T_82573:
   16353 	case WM_T_82575:
   16354 	case WM_T_82576:
   16355 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
   16356 		phyval &= ~PMR_D0_LPLU;
   16357 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
   16358 		break;
   16359 	case WM_T_82580:
   16360 	case WM_T_I350:
   16361 	case WM_T_I210:
   16362 	case WM_T_I211:
   16363 		reg = CSR_READ(sc, WMREG_PHPM);
   16364 		reg &= ~PHPM_D0A_LPLU;
   16365 		CSR_WRITE(sc, WMREG_PHPM, reg);
   16366 		break;
   16367 	case WM_T_82574:
   16368 	case WM_T_82583:
   16369 	case WM_T_ICH8:
   16370 	case WM_T_ICH9:
   16371 	case WM_T_ICH10:
   16372 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16373 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   16374 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16375 		CSR_WRITE_FLUSH(sc);
   16376 		break;
   16377 	case WM_T_PCH:
   16378 	case WM_T_PCH2:
   16379 	case WM_T_PCH_LPT:
   16380 	case WM_T_PCH_SPT:
   16381 	case WM_T_PCH_CNP:
   16382 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   16383 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   16384 		if (wm_phy_resetisblocked(sc) == false)
   16385 			phyval |= HV_OEM_BITS_ANEGNOW;
   16386 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   16387 		break;
   16388 	default:
   16389 		break;
   16390 	}
   16391 }
   16392 
   16393 /* EEE */
   16394 
   16395 static int
   16396 wm_set_eee_i350(struct wm_softc *sc)
   16397 {
   16398 	struct ethercom *ec = &sc->sc_ethercom;
   16399 	uint32_t ipcnfg, eeer;
   16400 	uint32_t ipcnfg_mask
   16401 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   16402 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   16403 
   16404 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   16405 
   16406 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   16407 	eeer = CSR_READ(sc, WMREG_EEER);
   16408 
   16409 	/* Enable or disable per user setting */
   16410 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16411 		ipcnfg |= ipcnfg_mask;
   16412 		eeer |= eeer_mask;
   16413 	} else {
   16414 		ipcnfg &= ~ipcnfg_mask;
   16415 		eeer &= ~eeer_mask;
   16416 	}
   16417 
   16418 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   16419 	CSR_WRITE(sc, WMREG_EEER, eeer);
   16420 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   16421 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   16422 
   16423 	return 0;
   16424 }
   16425 
   16426 static int
   16427 wm_set_eee_pchlan(struct wm_softc *sc)
   16428 {
   16429 	device_t dev = sc->sc_dev;
   16430 	struct ethercom *ec = &sc->sc_ethercom;
   16431 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   16432 	int rv;
   16433 
   16434 	switch (sc->sc_phytype) {
   16435 	case WMPHY_82579:
   16436 		lpa = I82579_EEE_LP_ABILITY;
   16437 		pcs_status = I82579_EEE_PCS_STATUS;
   16438 		adv_addr = I82579_EEE_ADVERTISEMENT;
   16439 		break;
   16440 	case WMPHY_I217:
   16441 		lpa = I217_EEE_LP_ABILITY;
   16442 		pcs_status = I217_EEE_PCS_STATUS;
   16443 		adv_addr = I217_EEE_ADVERTISEMENT;
   16444 		break;
   16445 	default:
   16446 		return 0;
   16447 	}
   16448 
   16449 	rv = sc->phy.acquire(sc);
   16450 	if (rv != 0) {
   16451 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   16452 		return rv;
   16453 	}
   16454 
   16455 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   16456 	if (rv != 0)
   16457 		goto release;
   16458 
   16459 	/* Clear bits that enable EEE in various speeds */
   16460 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   16461 
   16462 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16463 		/* Save off link partner's EEE ability */
   16464 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   16465 		if (rv != 0)
   16466 			goto release;
   16467 
   16468 		/* Read EEE advertisement */
   16469 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   16470 			goto release;
   16471 
   16472 		/*
   16473 		 * Enable EEE only for speeds in which the link partner is
   16474 		 * EEE capable and for which we advertise EEE.
   16475 		 */
   16476 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   16477 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   16478 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   16479 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   16480 			if ((data & ANLPAR_TX_FD) != 0)
   16481 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   16482 			else {
   16483 				/*
   16484 				 * EEE is not supported in 100Half, so ignore
   16485 				 * partner's EEE in 100 ability if full-duplex
   16486 				 * is not advertised.
   16487 				 */
   16488 				sc->eee_lp_ability
   16489 				    &= ~AN_EEEADVERT_100_TX;
   16490 			}
   16491 		}
   16492 	}
   16493 
   16494 	if (sc->sc_phytype == WMPHY_82579) {
   16495 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   16496 		if (rv != 0)
   16497 			goto release;
   16498 
   16499 		data &= ~I82579_LPI_PLL_SHUT_100;
   16500 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   16501 	}
   16502 
   16503 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   16504 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   16505 		goto release;
   16506 
   16507 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   16508 release:
   16509 	sc->phy.release(sc);
   16510 
   16511 	return rv;
   16512 }
   16513 
   16514 static int
   16515 wm_set_eee(struct wm_softc *sc)
   16516 {
   16517 	struct ethercom *ec = &sc->sc_ethercom;
   16518 
   16519 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   16520 		return 0;
   16521 
   16522 	if (sc->sc_type == WM_T_I354) {
   16523 		/* I354 uses an external PHY */
   16524 		return 0; /* not yet */
   16525 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   16526 		return wm_set_eee_i350(sc);
   16527 	else if (sc->sc_type >= WM_T_PCH2)
   16528 		return wm_set_eee_pchlan(sc);
   16529 
   16530 	return 0;
   16531 }
   16532 
   16533 /*
   16534  * Workarounds (mainly PHY related).
   16535  * Basically, PHY's workarounds are in the PHY drivers.
   16536  */
   16537 
   16538 /* Workaround for 82566 Kumeran PCS lock loss */
   16539 static int
   16540 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   16541 {
   16542 	struct mii_data *mii = &sc->sc_mii;
   16543 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16544 	int i, reg, rv;
   16545 	uint16_t phyreg;
   16546 
   16547 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16548 		device_xname(sc->sc_dev), __func__));
   16549 
   16550 	/* If the link is not up, do nothing */
   16551 	if ((status & STATUS_LU) == 0)
   16552 		return 0;
   16553 
   16554 	/* Nothing to do if the link is other than 1Gbps */
   16555 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   16556 		return 0;
   16557 
   16558 	for (i = 0; i < 10; i++) {
   16559 		/* read twice */
   16560 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16561 		if (rv != 0)
   16562 			return rv;
   16563 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16564 		if (rv != 0)
   16565 			return rv;
   16566 
   16567 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   16568 			goto out;	/* GOOD! */
   16569 
   16570 		/* Reset the PHY */
   16571 		wm_reset_phy(sc);
   16572 		delay(5*1000);
   16573 	}
   16574 
   16575 	/* Disable GigE link negotiation */
   16576 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16577 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   16578 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16579 
   16580 	/*
   16581 	 * Call gig speed drop workaround on Gig disable before accessing
   16582 	 * any PHY registers.
   16583 	 */
   16584 	wm_gig_downshift_workaround_ich8lan(sc);
   16585 
   16586 out:
   16587 	return 0;
   16588 }
   16589 
   16590 /*
   16591  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   16592  *  @sc: pointer to the HW structure
   16593  *
   16594  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   16595  *  LPLU, Gig disable, MDIC PHY reset):
   16596  *    1) Set Kumeran Near-end loopback
   16597  *    2) Clear Kumeran Near-end loopback
   16598  *  Should only be called for ICH8[m] devices with any 1G Phy.
   16599  */
   16600 static void
   16601 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   16602 {
   16603 	uint16_t kmreg;
   16604 
   16605 	/* Only for igp3 */
   16606 	if (sc->sc_phytype == WMPHY_IGP_3) {
   16607 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   16608 			return;
   16609 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   16610 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   16611 			return;
   16612 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   16613 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   16614 	}
   16615 }
   16616 
   16617 /*
   16618  * Workaround for pch's PHYs
   16619  * XXX should be moved to new PHY driver?
   16620  */
   16621 static int
   16622 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16623 {
   16624 	device_t dev = sc->sc_dev;
   16625 	struct mii_data *mii = &sc->sc_mii;
   16626 	struct mii_softc *child;
   16627 	uint16_t phy_data, phyrev = 0;
   16628 	int phytype = sc->sc_phytype;
   16629 	int rv;
   16630 
   16631 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16632 		device_xname(dev), __func__));
   16633 	KASSERT(sc->sc_type == WM_T_PCH);
   16634 
   16635 	/* Set MDIO slow mode before any other MDIO access */
   16636 	if (phytype == WMPHY_82577)
   16637 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   16638 			return rv;
   16639 
   16640 	child = LIST_FIRST(&mii->mii_phys);
   16641 	if (child != NULL)
   16642 		phyrev = child->mii_mpd_rev;
   16643 
   16644 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   16645 	if ((child != NULL) &&
   16646 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   16647 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   16648 		/* Disable generation of early preamble (0x4431) */
   16649 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16650 		    &phy_data);
   16651 		if (rv != 0)
   16652 			return rv;
   16653 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   16654 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   16655 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16656 		    phy_data);
   16657 		if (rv != 0)
   16658 			return rv;
   16659 
   16660 		/* Preamble tuning for SSC */
   16661 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   16662 		if (rv != 0)
   16663 			return rv;
   16664 	}
   16665 
   16666 	/* 82578 */
   16667 	if (phytype == WMPHY_82578) {
   16668 		/*
   16669 		 * Return registers to default by doing a soft reset then
   16670 		 * writing 0x3140 to the control register
   16671 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   16672 		 */
   16673 		if ((child != NULL) && (phyrev < 2)) {
   16674 			PHY_RESET(child);
   16675 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   16676 			if (rv != 0)
   16677 				return rv;
   16678 		}
   16679 	}
   16680 
   16681 	/* Select page 0 */
   16682 	if ((rv = sc->phy.acquire(sc)) != 0)
   16683 		return rv;
   16684 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
   16685 	sc->phy.release(sc);
   16686 	if (rv != 0)
   16687 		return rv;
   16688 
   16689 	/*
   16690 	 * Configure the K1 Si workaround during phy reset assuming there is
   16691 	 * link so that it disables K1 if link is in 1Gbps.
   16692 	 */
   16693 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   16694 		return rv;
   16695 
   16696 	/* Workaround for link disconnects on a busy hub in half duplex */
   16697 	rv = sc->phy.acquire(sc);
   16698 	if (rv)
   16699 		return rv;
   16700 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   16701 	if (rv)
   16702 		goto release;
   16703 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   16704 	    phy_data & 0x00ff);
   16705 	if (rv)
   16706 		goto release;
   16707 
   16708 	/* Set MSE higher to enable link to stay up when noise is high */
   16709 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   16710 release:
   16711 	sc->phy.release(sc);
   16712 
   16713 	return rv;
   16714 }
   16715 
   16716 /*
   16717  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   16718  *  @sc:   pointer to the HW structure
   16719  */
   16720 static void
   16721 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   16722 {
   16723 
   16724 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16725 		device_xname(sc->sc_dev), __func__));
   16726 
   16727 	if (sc->phy.acquire(sc) != 0)
   16728 		return;
   16729 
   16730 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16731 
   16732 	sc->phy.release(sc);
   16733 }
   16734 
   16735 static void
   16736 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
   16737 {
   16738 	device_t dev = sc->sc_dev;
   16739 	uint32_t mac_reg;
   16740 	uint16_t i, wuce;
   16741 	int count;
   16742 
   16743 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16744 		device_xname(dev), __func__));
   16745 
   16746 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   16747 		return;
   16748 
   16749 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   16750 	count = wm_rar_count(sc);
   16751 	for (i = 0; i < count; i++) {
   16752 		uint16_t lo, hi;
   16753 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16754 		lo = (uint16_t)(mac_reg & 0xffff);
   16755 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   16756 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   16757 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   16758 
   16759 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16760 		lo = (uint16_t)(mac_reg & 0xffff);
   16761 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   16762 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   16763 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   16764 	}
   16765 
   16766 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   16767 }
   16768 
   16769 /*
   16770  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
   16771  *  with 82579 PHY
   16772  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
   16773  */
   16774 static int
   16775 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
   16776 {
   16777 	device_t dev = sc->sc_dev;
   16778 	int rar_count;
   16779 	int rv;
   16780 	uint32_t mac_reg;
   16781 	uint16_t dft_ctrl, data;
   16782 	uint16_t i;
   16783 
   16784 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16785 		device_xname(dev), __func__));
   16786 
   16787 	if (sc->sc_type < WM_T_PCH2)
   16788 		return 0;
   16789 
   16790 	/* Acquire PHY semaphore */
   16791 	rv = sc->phy.acquire(sc);
   16792 	if (rv != 0)
   16793 		return rv;
   16794 
   16795 	/* Disable Rx path while enabling/disabling workaround */
   16796 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
   16797 	if (rv != 0)
   16798 		goto out;
   16799 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16800 	    dft_ctrl | (1 << 14));
   16801 	if (rv != 0)
   16802 		goto out;
   16803 
   16804 	if (enable) {
   16805 		/* Write Rx addresses (rar_entry_count for RAL/H, and
   16806 		 * SHRAL/H) and initial CRC values to the MAC
   16807 		 */
   16808 		rar_count = wm_rar_count(sc);
   16809 		for (i = 0; i < rar_count; i++) {
   16810 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
   16811 			uint32_t addr_high, addr_low;
   16812 
   16813 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16814 			if (!(addr_high & RAL_AV))
   16815 				continue;
   16816 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16817 			mac_addr[0] = (addr_low & 0xFF);
   16818 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
   16819 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
   16820 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
   16821 			mac_addr[4] = (addr_high & 0xFF);
   16822 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
   16823 
   16824 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
   16825 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
   16826 		}
   16827 
   16828 		/* Write Rx addresses to the PHY */
   16829 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16830 	}
   16831 
   16832 	/*
   16833 	 * If enable ==
   16834 	 *	true: Enable jumbo frame workaround in the MAC.
   16835 	 *	false: Write MAC register values back to h/w defaults.
   16836 	 */
   16837 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
   16838 	if (enable) {
   16839 		mac_reg &= ~(1 << 14);
   16840 		mac_reg |= (7 << 15);
   16841 	} else
   16842 		mac_reg &= ~(0xf << 14);
   16843 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
   16844 
   16845 	mac_reg = CSR_READ(sc, WMREG_RCTL);
   16846 	if (enable) {
   16847 		mac_reg |= RCTL_SECRC;
   16848 		sc->sc_rctl |= RCTL_SECRC;
   16849 		sc->sc_flags |= WM_F_CRC_STRIP;
   16850 	} else {
   16851 		mac_reg &= ~RCTL_SECRC;
   16852 		sc->sc_rctl &= ~RCTL_SECRC;
   16853 		sc->sc_flags &= ~WM_F_CRC_STRIP;
   16854 	}
   16855 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
   16856 
   16857 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
   16858 	if (rv != 0)
   16859 		goto out;
   16860 	if (enable)
   16861 		data |= 1 << 0;
   16862 	else
   16863 		data &= ~(1 << 0);
   16864 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
   16865 	if (rv != 0)
   16866 		goto out;
   16867 
   16868 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
   16869 	if (rv != 0)
   16870 		goto out;
   16871 	/*
   16872 	 * XXX FreeBSD and Linux do the same thing that they set the same value
   16873 	 * on both the enable case and the disable case. Is it correct?
   16874 	 */
   16875 	data &= ~(0xf << 8);
   16876 	data |= (0xb << 8);
   16877 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
   16878 	if (rv != 0)
   16879 		goto out;
   16880 
   16881 	/*
   16882 	 * If enable ==
   16883 	 *	true: Enable jumbo frame workaround in the PHY.
   16884 	 *	false: Write PHY register values back to h/w defaults.
   16885 	 */
   16886 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
   16887 	if (rv != 0)
   16888 		goto out;
   16889 	data &= ~(0x7F << 5);
   16890 	if (enable)
   16891 		data |= (0x37 << 5);
   16892 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
   16893 	if (rv != 0)
   16894 		goto out;
   16895 
   16896 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
   16897 	if (rv != 0)
   16898 		goto out;
   16899 	if (enable)
   16900 		data &= ~(1 << 13);
   16901 	else
   16902 		data |= (1 << 13);
   16903 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
   16904 	if (rv != 0)
   16905 		goto out;
   16906 
   16907 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
   16908 	if (rv != 0)
   16909 		goto out;
   16910 	data &= ~(0x3FF << 2);
   16911 	if (enable)
   16912 		data |= (I82579_TX_PTR_GAP << 2);
   16913 	else
   16914 		data |= (0x8 << 2);
   16915 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
   16916 	if (rv != 0)
   16917 		goto out;
   16918 
   16919 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
   16920 	    enable ? 0xf100 : 0x7e00);
   16921 	if (rv != 0)
   16922 		goto out;
   16923 
   16924 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
   16925 	if (rv != 0)
   16926 		goto out;
   16927 	if (enable)
   16928 		data |= 1 << 10;
   16929 	else
   16930 		data &= ~(1 << 10);
   16931 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
   16932 	if (rv != 0)
   16933 		goto out;
   16934 
   16935 	/* Re-enable Rx path after enabling/disabling workaround */
   16936 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16937 	    dft_ctrl & ~(1 << 14));
   16938 
   16939 out:
   16940 	sc->phy.release(sc);
   16941 
   16942 	return rv;
   16943 }
   16944 
   16945 /*
   16946  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   16947  *  done after every PHY reset.
   16948  */
   16949 static int
   16950 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16951 {
   16952 	device_t dev = sc->sc_dev;
   16953 	int rv;
   16954 
   16955 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16956 		device_xname(dev), __func__));
   16957 	KASSERT(sc->sc_type == WM_T_PCH2);
   16958 
   16959 	/* Set MDIO slow mode before any other MDIO access */
   16960 	rv = wm_set_mdio_slow_mode_hv(sc);
   16961 	if (rv != 0)
   16962 		return rv;
   16963 
   16964 	rv = sc->phy.acquire(sc);
   16965 	if (rv != 0)
   16966 		return rv;
   16967 	/* Set MSE higher to enable link to stay up when noise is high */
   16968 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   16969 	if (rv != 0)
   16970 		goto release;
   16971 	/* Drop link after 5 times MSE threshold was reached */
   16972 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   16973 release:
   16974 	sc->phy.release(sc);
   16975 
   16976 	return rv;
   16977 }
   16978 
   16979 /**
   16980  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   16981  *  @link: link up bool flag
   16982  *
   16983  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   16984  *  preventing further DMA write requests.  Workaround the issue by disabling
   16985  *  the de-assertion of the clock request when in 1Gpbs mode.
   16986  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   16987  *  speeds in order to avoid Tx hangs.
   16988  **/
   16989 static int
   16990 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   16991 {
   16992 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   16993 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16994 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   16995 	uint16_t phyreg;
   16996 
   16997 	if (link && (speed == STATUS_SPEED_1000)) {
   16998 		int rv;
   16999 
   17000 		rv = sc->phy.acquire(sc);
   17001 		if (rv != 0)
   17002 			return rv;
   17003 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17004 		    &phyreg);
   17005 		if (rv != 0)
   17006 			goto release;
   17007 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17008 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   17009 		if (rv != 0)
   17010 			goto release;
   17011 		delay(20);
   17012 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   17013 
   17014 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17015 		    &phyreg);
   17016 release:
   17017 		sc->phy.release(sc);
   17018 		return rv;
   17019 	}
   17020 
   17021 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   17022 
   17023 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   17024 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   17025 	    || !link
   17026 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   17027 		goto update_fextnvm6;
   17028 
   17029 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   17030 
   17031 	/* Clear link status transmit timeout */
   17032 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   17033 	if (speed == STATUS_SPEED_100) {
   17034 		/* Set inband Tx timeout to 5x10us for 100Half */
   17035 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   17036 
   17037 		/* Do not extend the K1 entry latency for 100Half */
   17038 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   17039 	} else {
   17040 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   17041 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   17042 
   17043 		/* Extend the K1 entry latency for 10 Mbps */
   17044 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   17045 	}
   17046 
   17047 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   17048 
   17049 update_fextnvm6:
   17050 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   17051 	return 0;
   17052 }
   17053 
   17054 /*
   17055  *  wm_k1_gig_workaround_hv - K1 Si workaround
   17056  *  @sc:   pointer to the HW structure
   17057  *  @link: link up bool flag
   17058  *
   17059  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   17060  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   17061  *  If link is down, the function will restore the default K1 setting located
   17062  *  in the NVM.
   17063  */
   17064 static int
   17065 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   17066 {
   17067 	int k1_enable = sc->sc_nvm_k1_enabled;
   17068 	int rv;
   17069 
   17070 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17071 		device_xname(sc->sc_dev), __func__));
   17072 
   17073 	rv = sc->phy.acquire(sc);
   17074 	if (rv != 0)
   17075 		return rv;
   17076 
   17077 	if (link) {
   17078 		k1_enable = 0;
   17079 
   17080 		/* Link stall fix for link up */
   17081 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   17082 		    0x0100);
   17083 	} else {
   17084 		/* Link stall fix for link down */
   17085 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   17086 		    0x4100);
   17087 	}
   17088 
   17089 	wm_configure_k1_ich8lan(sc, k1_enable);
   17090 	sc->phy.release(sc);
   17091 
   17092 	return 0;
   17093 }
   17094 
   17095 /*
   17096  *  wm_k1_workaround_lv - K1 Si workaround
   17097  *  @sc:   pointer to the HW structure
   17098  *
   17099  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   17100  *  Disable K1 for 1000 and 100 speeds
   17101  */
   17102 static int
   17103 wm_k1_workaround_lv(struct wm_softc *sc)
   17104 {
   17105 	uint32_t reg;
   17106 	uint16_t phyreg;
   17107 	int rv;
   17108 
   17109 	if (sc->sc_type != WM_T_PCH2)
   17110 		return 0;
   17111 
   17112 	/* Set K1 beacon duration based on 10Mbps speed */
   17113 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   17114 	if (rv != 0)
   17115 		return rv;
   17116 
   17117 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   17118 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   17119 		if (phyreg &
   17120 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   17121 			/* LV 1G/100 Packet drop issue wa  */
   17122 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   17123 			    &phyreg);
   17124 			if (rv != 0)
   17125 				return rv;
   17126 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   17127 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   17128 			    phyreg);
   17129 			if (rv != 0)
   17130 				return rv;
   17131 		} else {
   17132 			/* For 10Mbps */
   17133 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   17134 			reg &= ~FEXTNVM4_BEACON_DURATION;
   17135 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   17136 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   17137 		}
   17138 	}
   17139 
   17140 	return 0;
   17141 }
   17142 
   17143 /*
   17144  *  wm_link_stall_workaround_hv - Si workaround
   17145  *  @sc: pointer to the HW structure
   17146  *
   17147  *  This function works around a Si bug where the link partner can get
   17148  *  a link up indication before the PHY does. If small packets are sent
   17149  *  by the link partner they can be placed in the packet buffer without
   17150  *  being properly accounted for by the PHY and will stall preventing
   17151  *  further packets from being received.  The workaround is to clear the
   17152  *  packet buffer after the PHY detects link up.
   17153  */
   17154 static int
   17155 wm_link_stall_workaround_hv(struct wm_softc *sc)
   17156 {
   17157 	uint16_t phyreg;
   17158 
   17159 	if (sc->sc_phytype != WMPHY_82578)
   17160 		return 0;
   17161 
   17162 	/* Do not apply workaround if in PHY loopback bit 14 set */
   17163 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   17164 	if ((phyreg & BMCR_LOOP) != 0)
   17165 		return 0;
   17166 
   17167 	/* Check if link is up and at 1Gbps */
   17168 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   17169 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17170 	    | BM_CS_STATUS_SPEED_MASK;
   17171 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17172 		| BM_CS_STATUS_SPEED_1000))
   17173 		return 0;
   17174 
   17175 	delay(200 * 1000);	/* XXX too big */
   17176 
   17177 	/* Flush the packets in the fifo buffer */
   17178 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17179 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   17180 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17181 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   17182 
   17183 	return 0;
   17184 }
   17185 
   17186 static int
   17187 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   17188 {
   17189 	int rv;
   17190 
   17191 	rv = sc->phy.acquire(sc);
   17192 	if (rv != 0) {
   17193 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   17194 		    __func__);
   17195 		return rv;
   17196 	}
   17197 
   17198 	rv = wm_set_mdio_slow_mode_hv_locked(sc);
   17199 
   17200 	sc->phy.release(sc);
   17201 
   17202 	return rv;
   17203 }
   17204 
   17205 static int
   17206 wm_set_mdio_slow_mode_hv_locked(struct wm_softc *sc)
   17207 {
   17208 	int rv;
   17209 	uint16_t reg;
   17210 
   17211 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   17212 	if (rv != 0)
   17213 		return rv;
   17214 
   17215 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   17216 	    reg | HV_KMRN_MDIO_SLOW);
   17217 }
   17218 
   17219 /*
   17220  *  wm_configure_k1_ich8lan - Configure K1 power state
   17221  *  @sc: pointer to the HW structure
   17222  *  @enable: K1 state to configure
   17223  *
   17224  *  Configure the K1 power state based on the provided parameter.
   17225  *  Assumes semaphore already acquired.
   17226  */
   17227 static void
   17228 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   17229 {
   17230 	uint32_t ctrl, ctrl_ext, tmp;
   17231 	uint16_t kmreg;
   17232 	int rv;
   17233 
   17234 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17235 
   17236 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   17237 	if (rv != 0)
   17238 		return;
   17239 
   17240 	if (k1_enable)
   17241 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   17242 	else
   17243 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   17244 
   17245 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   17246 	if (rv != 0)
   17247 		return;
   17248 
   17249 	delay(20);
   17250 
   17251 	ctrl = CSR_READ(sc, WMREG_CTRL);
   17252 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   17253 
   17254 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   17255 	tmp |= CTRL_FRCSPD;
   17256 
   17257 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   17258 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   17259 	CSR_WRITE_FLUSH(sc);
   17260 	delay(20);
   17261 
   17262 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   17263 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   17264 	CSR_WRITE_FLUSH(sc);
   17265 	delay(20);
   17266 
   17267 	return;
   17268 }
   17269 
   17270 /* special case - for 82575 - need to do manual init ... */
   17271 static void
   17272 wm_reset_init_script_82575(struct wm_softc *sc)
   17273 {
   17274 	/*
   17275 	 * Remark: this is untested code - we have no board without EEPROM
   17276 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   17277 	 */
   17278 
   17279 	/* SerDes configuration via SERDESCTRL */
   17280 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   17281 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   17282 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   17283 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   17284 
   17285 	/* CCM configuration via CCMCTL register */
   17286 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   17287 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   17288 
   17289 	/* PCIe lanes configuration */
   17290 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   17291 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   17292 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   17293 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   17294 
   17295 	/* PCIe PLL Configuration */
   17296 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   17297 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   17298 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   17299 }
   17300 
   17301 static void
   17302 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   17303 {
   17304 	uint32_t reg;
   17305 	uint16_t nvmword;
   17306 	int rv;
   17307 
   17308 	if (sc->sc_type != WM_T_82580)
   17309 		return;
   17310 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   17311 		return;
   17312 
   17313 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   17314 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   17315 	if (rv != 0) {
   17316 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   17317 		    __func__);
   17318 		return;
   17319 	}
   17320 
   17321 	reg = CSR_READ(sc, WMREG_MDICNFG);
   17322 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   17323 		reg |= MDICNFG_DEST;
   17324 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   17325 		reg |= MDICNFG_COM_MDIO;
   17326 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17327 }
   17328 
   17329 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   17330 
   17331 static bool
   17332 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   17333 {
   17334 	uint32_t reg;
   17335 	uint16_t id1, id2;
   17336 	int i, rv;
   17337 
   17338 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17339 		device_xname(sc->sc_dev), __func__));
   17340 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17341 
   17342 	id1 = id2 = 0xffff;
   17343 	for (i = 0; i < 2; i++) {
   17344 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   17345 		    &id1);
   17346 		if ((rv != 0) || MII_INVALIDID(id1))
   17347 			continue;
   17348 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   17349 		    &id2);
   17350 		if ((rv != 0) || MII_INVALIDID(id2))
   17351 			continue;
   17352 		break;
   17353 	}
   17354 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   17355 		goto out;
   17356 
   17357 	/*
   17358 	 * In case the PHY needs to be in mdio slow mode,
   17359 	 * set slow mode and try to get the PHY id again.
   17360 	 */
   17361 	rv = 0;
   17362 	if (sc->sc_type < WM_T_PCH_LPT) {
   17363 		wm_set_mdio_slow_mode_hv_locked(sc);
   17364 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   17365 		    &id1);
   17366 		rv |= wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   17367 		    &id2);
   17368 	}
   17369 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   17370 		device_printf(sc->sc_dev, "XXX return with false\n");
   17371 		return false;
   17372 	}
   17373 out:
   17374 	if (sc->sc_type >= WM_T_PCH_LPT) {
   17375 		/* Only unforce SMBus if ME is not active */
   17376 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   17377 			uint16_t phyreg;
   17378 
   17379 			/* Unforce SMBus mode in PHY */
   17380 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   17381 			    CV_SMB_CTRL, &phyreg);
   17382 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   17383 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   17384 			    CV_SMB_CTRL, phyreg);
   17385 
   17386 			/* Unforce SMBus mode in MAC */
   17387 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17388 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   17389 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17390 		}
   17391 	}
   17392 	return true;
   17393 }
   17394 
   17395 static void
   17396 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   17397 {
   17398 	uint32_t reg;
   17399 	int i;
   17400 
   17401 	/* Set PHY Config Counter to 50msec */
   17402 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   17403 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   17404 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   17405 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   17406 
   17407 	/* Toggle LANPHYPC */
   17408 	reg = CSR_READ(sc, WMREG_CTRL);
   17409 	reg |= CTRL_LANPHYPC_OVERRIDE;
   17410 	reg &= ~CTRL_LANPHYPC_VALUE;
   17411 	CSR_WRITE(sc, WMREG_CTRL, reg);
   17412 	CSR_WRITE_FLUSH(sc);
   17413 	delay(1000);
   17414 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   17415 	CSR_WRITE(sc, WMREG_CTRL, reg);
   17416 	CSR_WRITE_FLUSH(sc);
   17417 
   17418 	if (sc->sc_type < WM_T_PCH_LPT)
   17419 		delay(50 * 1000);
   17420 	else {
   17421 		i = 20;
   17422 
   17423 		do {
   17424 			delay(5 * 1000);
   17425 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   17426 		    && i--);
   17427 
   17428 		delay(30 * 1000);
   17429 	}
   17430 }
   17431 
   17432 static int
   17433 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   17434 {
   17435 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   17436 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   17437 	uint32_t rxa;
   17438 	uint16_t scale = 0, lat_enc = 0;
   17439 	int32_t obff_hwm = 0;
   17440 	int64_t lat_ns, value;
   17441 
   17442 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17443 		device_xname(sc->sc_dev), __func__));
   17444 
   17445 	if (link) {
   17446 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   17447 		uint32_t status;
   17448 		uint16_t speed;
   17449 		pcireg_t preg;
   17450 
   17451 		status = CSR_READ(sc, WMREG_STATUS);
   17452 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   17453 		case STATUS_SPEED_10:
   17454 			speed = 10;
   17455 			break;
   17456 		case STATUS_SPEED_100:
   17457 			speed = 100;
   17458 			break;
   17459 		case STATUS_SPEED_1000:
   17460 			speed = 1000;
   17461 			break;
   17462 		default:
   17463 			device_printf(sc->sc_dev, "Unknown speed "
   17464 			    "(status = %08x)\n", status);
   17465 			return -1;
   17466 		}
   17467 
   17468 		/* Rx Packet Buffer Allocation size (KB) */
   17469 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   17470 
   17471 		/*
   17472 		 * Determine the maximum latency tolerated by the device.
   17473 		 *
   17474 		 * Per the PCIe spec, the tolerated latencies are encoded as
   17475 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   17476 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   17477 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   17478 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   17479 		 */
   17480 		lat_ns = ((int64_t)rxa * 1024 -
   17481 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   17482 			+ ETHER_HDR_LEN))) * 8 * 1000;
   17483 		if (lat_ns < 0)
   17484 			lat_ns = 0;
   17485 		else
   17486 			lat_ns /= speed;
   17487 		value = lat_ns;
   17488 
   17489 		while (value > LTRV_VALUE) {
   17490 			scale ++;
   17491 			value = howmany(value, __BIT(5));
   17492 		}
   17493 		if (scale > LTRV_SCALE_MAX) {
   17494 			device_printf(sc->sc_dev,
   17495 			    "Invalid LTR latency scale %d\n", scale);
   17496 			return -1;
   17497 		}
   17498 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   17499 
   17500 		/* Determine the maximum latency tolerated by the platform */
   17501 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17502 		    WM_PCI_LTR_CAP_LPT);
   17503 		max_snoop = preg & 0xffff;
   17504 		max_nosnoop = preg >> 16;
   17505 
   17506 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   17507 
   17508 		if (lat_enc > max_ltr_enc) {
   17509 			lat_enc = max_ltr_enc;
   17510 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   17511 			    * PCI_LTR_SCALETONS(
   17512 				    __SHIFTOUT(lat_enc,
   17513 					PCI_LTR_MAXSNOOPLAT_SCALE));
   17514 		}
   17515 
   17516 		if (lat_ns) {
   17517 			lat_ns *= speed * 1000;
   17518 			lat_ns /= 8;
   17519 			lat_ns /= 1000000000;
   17520 			obff_hwm = (int32_t)(rxa - lat_ns);
   17521 		}
   17522 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   17523 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   17524 			    "(rxa = %d, lat_ns = %d)\n",
   17525 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   17526 			return -1;
   17527 		}
   17528 	}
   17529 	/* Snoop and No-Snoop latencies the same */
   17530 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   17531 	CSR_WRITE(sc, WMREG_LTRV, reg);
   17532 
   17533 	/* Set OBFF high water mark */
   17534 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   17535 	reg |= obff_hwm;
   17536 	CSR_WRITE(sc, WMREG_SVT, reg);
   17537 
   17538 	/* Enable OBFF */
   17539 	reg = CSR_READ(sc, WMREG_SVCR);
   17540 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   17541 	CSR_WRITE(sc, WMREG_SVCR, reg);
   17542 
   17543 	return 0;
   17544 }
   17545 
   17546 /*
   17547  * I210 Errata 25 and I211 Errata 10
   17548  * Slow System Clock.
   17549  *
   17550  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   17551  */
   17552 static int
   17553 wm_pll_workaround_i210(struct wm_softc *sc)
   17554 {
   17555 	uint32_t mdicnfg, wuc;
   17556 	uint32_t reg;
   17557 	pcireg_t pcireg;
   17558 	uint32_t pmreg;
   17559 	uint16_t nvmword, tmp_nvmword;
   17560 	uint16_t phyval;
   17561 	bool wa_done = false;
   17562 	int i, rv = 0;
   17563 
   17564 	/* Get Power Management cap offset */
   17565 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   17566 	    &pmreg, NULL) == 0)
   17567 		return -1;
   17568 
   17569 	/* Save WUC and MDICNFG registers */
   17570 	wuc = CSR_READ(sc, WMREG_WUC);
   17571 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   17572 
   17573 	reg = mdicnfg & ~MDICNFG_DEST;
   17574 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17575 
   17576 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   17577 		/*
   17578 		 * The default value of the Initialization Control Word 1
   17579 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   17580 		 */
   17581 		nvmword = INVM_DEFAULT_AL;
   17582 	}
   17583 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   17584 
   17585 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   17586 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   17587 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   17588 
   17589 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   17590 			rv = 0;
   17591 			break; /* OK */
   17592 		} else
   17593 			rv = -1;
   17594 
   17595 		wa_done = true;
   17596 		/* Directly reset the internal PHY */
   17597 		reg = CSR_READ(sc, WMREG_CTRL);
   17598 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   17599 
   17600 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17601 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   17602 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17603 
   17604 		CSR_WRITE(sc, WMREG_WUC, 0);
   17605 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   17606 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17607 
   17608 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17609 		    pmreg + PCI_PMCSR);
   17610 		pcireg |= PCI_PMCSR_STATE_D3;
   17611 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17612 		    pmreg + PCI_PMCSR, pcireg);
   17613 		delay(1000);
   17614 		pcireg &= ~PCI_PMCSR_STATE_D3;
   17615 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17616 		    pmreg + PCI_PMCSR, pcireg);
   17617 
   17618 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   17619 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17620 
   17621 		/* Restore WUC register */
   17622 		CSR_WRITE(sc, WMREG_WUC, wuc);
   17623 	}
   17624 
   17625 	/* Restore MDICNFG setting */
   17626 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   17627 	if (wa_done)
   17628 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   17629 	return rv;
   17630 }
   17631 
   17632 static void
   17633 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   17634 {
   17635 	uint32_t reg;
   17636 
   17637 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17638 		device_xname(sc->sc_dev), __func__));
   17639 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   17640 	    || (sc->sc_type == WM_T_PCH_CNP));
   17641 
   17642 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   17643 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   17644 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   17645 
   17646 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   17647 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   17648 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   17649 }
   17650 
   17651 /* Sysctl functions */
   17652 static int
   17653 wm_sysctl_tdh_handler(SYSCTLFN_ARGS)
   17654 {
   17655 	struct sysctlnode node = *rnode;
   17656 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   17657 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   17658 	struct wm_softc *sc = txq->txq_sc;
   17659 	uint32_t reg;
   17660 
   17661 	reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id));
   17662 	node.sysctl_data = &reg;
   17663 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   17664 }
   17665 
   17666 static int
   17667 wm_sysctl_tdt_handler(SYSCTLFN_ARGS)
   17668 {
   17669 	struct sysctlnode node = *rnode;
   17670 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   17671 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   17672 	struct wm_softc *sc = txq->txq_sc;
   17673 	uint32_t reg;
   17674 
   17675 	reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id));
   17676 	node.sysctl_data = &reg;
   17677 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   17678 }
   17679 
   17680 #ifdef WM_DEBUG
   17681 static int
   17682 wm_sysctl_debug(SYSCTLFN_ARGS)
   17683 {
   17684 	struct sysctlnode node = *rnode;
   17685 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
   17686 	uint32_t dflags;
   17687 	int error;
   17688 
   17689 	dflags = sc->sc_debug;
   17690 	node.sysctl_data = &dflags;
   17691 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   17692 
   17693 	if (error || newp == NULL)
   17694 		return error;
   17695 
   17696 	sc->sc_debug = dflags;
   17697 	device_printf(sc->sc_dev, "TARC0: %08x\n", CSR_READ(sc, WMREG_TARC0));
   17698 	device_printf(sc->sc_dev, "TDT0: %08x\n", CSR_READ(sc, WMREG_TDT(0)));
   17699 
   17700 	return 0;
   17701 }
   17702 #endif
   17703