Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.781
      1 /*	$NetBSD: if_wm.c,v 1.781 2023/05/11 07:47:14 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.781 2023/05/11 07:47:14 msaitoh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_if_wm.h"
     89 #endif
     90 
     91 #include <sys/param.h>
     92 
     93 #include <sys/atomic.h>
     94 #include <sys/callout.h>
     95 #include <sys/cpu.h>
     96 #include <sys/device.h>
     97 #include <sys/errno.h>
     98 #include <sys/interrupt.h>
     99 #include <sys/ioctl.h>
    100 #include <sys/kernel.h>
    101 #include <sys/kmem.h>
    102 #include <sys/mbuf.h>
    103 #include <sys/pcq.h>
    104 #include <sys/queue.h>
    105 #include <sys/rndsource.h>
    106 #include <sys/socket.h>
    107 #include <sys/sysctl.h>
    108 #include <sys/syslog.h>
    109 #include <sys/systm.h>
    110 #include <sys/workqueue.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <net/rss_config.h>
    120 
    121 #include <netinet/in.h>			/* XXX for struct ip */
    122 #include <netinet/in_systm.h>		/* XXX for struct ip */
    123 #include <netinet/ip.h>			/* XXX for struct ip */
    124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    126 
    127 #include <sys/bus.h>
    128 #include <sys/intr.h>
    129 #include <machine/endian.h>
    130 
    131 #include <dev/mii/mii.h>
    132 #include <dev/mii/mdio.h>
    133 #include <dev/mii/miivar.h>
    134 #include <dev/mii/miidevs.h>
    135 #include <dev/mii/mii_bitbang.h>
    136 #include <dev/mii/ikphyreg.h>
    137 #include <dev/mii/igphyreg.h>
    138 #include <dev/mii/igphyvar.h>
    139 #include <dev/mii/inbmphyreg.h>
    140 #include <dev/mii/ihphyreg.h>
    141 #include <dev/mii/makphyreg.h>
    142 
    143 #include <dev/pci/pcireg.h>
    144 #include <dev/pci/pcivar.h>
    145 #include <dev/pci/pcidevs.h>
    146 
    147 #include <dev/pci/if_wmreg.h>
    148 #include <dev/pci/if_wmvar.h>
    149 
    150 #ifdef WM_DEBUG
    151 #define	WM_DEBUG_LINK		__BIT(0)
    152 #define	WM_DEBUG_TX		__BIT(1)
    153 #define	WM_DEBUG_RX		__BIT(2)
    154 #define	WM_DEBUG_GMII		__BIT(3)
    155 #define	WM_DEBUG_MANAGE		__BIT(4)
    156 #define	WM_DEBUG_NVM		__BIT(5)
    157 #define	WM_DEBUG_INIT		__BIT(6)
    158 #define	WM_DEBUG_LOCK		__BIT(7)
    159 
    160 #if 0
    161 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
    162 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
    163 	WM_DEBUG_LOCK
    164 #endif
    165 
    166 #define	DPRINTF(sc, x, y)			  \
    167 	do {					  \
    168 		if ((sc)->sc_debug & (x))	  \
    169 			printf y;		  \
    170 	} while (0)
    171 #else
    172 #define	DPRINTF(sc, x, y)	__nothing
    173 #endif /* WM_DEBUG */
    174 
    175 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    176 
    177 /*
    178  * This device driver's max interrupt numbers.
    179  */
    180 #define WM_MAX_NQUEUEINTR	16
    181 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    182 
    183 #ifndef WM_DISABLE_MSI
    184 #define	WM_DISABLE_MSI 0
    185 #endif
    186 #ifndef WM_DISABLE_MSIX
    187 #define	WM_DISABLE_MSIX 0
    188 #endif
    189 
    190 int wm_disable_msi = WM_DISABLE_MSI;
    191 int wm_disable_msix = WM_DISABLE_MSIX;
    192 
    193 #ifndef WM_WATCHDOG_TIMEOUT
    194 #define WM_WATCHDOG_TIMEOUT 5
    195 #endif
    196 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    197 
    198 /*
    199  * Transmit descriptor list size.  Due to errata, we can only have
    200  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    201  * on >= 82544. We tell the upper layers that they can queue a lot
    202  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    203  * of them at a time.
    204  *
    205  * We allow up to 64 DMA segments per packet.  Pathological packet
    206  * chains containing many small mbufs have been observed in zero-copy
    207  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    208  * m_defrag() is called to reduce it.
    209  */
    210 #define	WM_NTXSEGS		64
    211 #define	WM_IFQUEUELEN		256
    212 #define	WM_TXQUEUELEN_MAX	64
    213 #define	WM_TXQUEUELEN_MAX_82547	16
    214 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    215 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    216 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    217 #define	WM_NTXDESC_82542	256
    218 #define	WM_NTXDESC_82544	4096
    219 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    220 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    221 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    222 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    223 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    224 
    225 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    226 
    227 #define	WM_TXINTERQSIZE		256
    228 
    229 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    230 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    231 #endif
    232 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    233 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    234 #endif
    235 
    236 /*
    237  * Receive descriptor list size.  We have one Rx buffer for normal
    238  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    239  * packet.  We allocate 256 receive descriptors, each with a 2k
    240  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    241  */
    242 #define	WM_NRXDESC		256U
    243 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    244 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    245 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    246 
    247 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    248 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    249 #endif
    250 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    251 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    252 #endif
    253 
    254 typedef union txdescs {
    255 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    256 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    257 } txdescs_t;
    258 
    259 typedef union rxdescs {
    260 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    261 	ext_rxdesc_t	 sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    262 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    263 } rxdescs_t;
    264 
    265 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    266 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    267 
    268 /*
    269  * Software state for transmit jobs.
    270  */
    271 struct wm_txsoft {
    272 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    273 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    274 	int txs_firstdesc;		/* first descriptor in packet */
    275 	int txs_lastdesc;		/* last descriptor in packet */
    276 	int txs_ndesc;			/* # of descriptors used */
    277 };
    278 
    279 /*
    280  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    281  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    282  * them together.
    283  */
    284 struct wm_rxsoft {
    285 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    286 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    287 };
    288 
    289 #define WM_LINKUP_TIMEOUT	50
    290 
    291 static uint16_t swfwphysem[] = {
    292 	SWFW_PHY0_SM,
    293 	SWFW_PHY1_SM,
    294 	SWFW_PHY2_SM,
    295 	SWFW_PHY3_SM
    296 };
    297 
    298 static const uint32_t wm_82580_rxpbs_table[] = {
    299 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    300 };
    301 
    302 struct wm_softc;
    303 
    304 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
    305 #if !defined(WM_EVENT_COUNTERS)
    306 #define WM_EVENT_COUNTERS 1
    307 #endif
    308 #endif
    309 
    310 #ifdef WM_EVENT_COUNTERS
    311 #define WM_Q_EVCNT_DEFINE(qname, evname)				 \
    312 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    313 	struct evcnt qname##_ev_##evname
    314 
    315 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    316 	do {								\
    317 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    318 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    319 		    "%s%02d%s", #qname, (qnum), #evname);		\
    320 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    321 		    (evtype), NULL, (xname),				\
    322 		    (q)->qname##_##evname##_evcnt_name);		\
    323 	} while (0)
    324 
    325 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    326 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    327 
    328 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    329 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    330 
    331 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    332 	evcnt_detach(&(q)->qname##_ev_##evname)
    333 #endif /* WM_EVENT_COUNTERS */
    334 
    335 struct wm_txqueue {
    336 	kmutex_t *txq_lock;		/* lock for tx operations */
    337 
    338 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    339 
    340 	/* Software state for the transmit descriptors. */
    341 	int txq_num;			/* must be a power of two */
    342 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    343 
    344 	/* TX control data structures. */
    345 	int txq_ndesc;			/* must be a power of two */
    346 	size_t txq_descsize;		/* a tx descriptor size */
    347 	txdescs_t *txq_descs_u;
    348 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    349 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    350 	int txq_desc_rseg;		/* real number of control segment */
    351 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    352 #define	txq_descs	txq_descs_u->sctxu_txdescs
    353 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    354 
    355 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    356 
    357 	int txq_free;			/* number of free Tx descriptors */
    358 	int txq_next;			/* next ready Tx descriptor */
    359 
    360 	int txq_sfree;			/* number of free Tx jobs */
    361 	int txq_snext;			/* next free Tx job */
    362 	int txq_sdirty;			/* dirty Tx jobs */
    363 
    364 	/* These 4 variables are used only on the 82547. */
    365 	int txq_fifo_size;		/* Tx FIFO size */
    366 	int txq_fifo_head;		/* current head of FIFO */
    367 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    368 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    369 
    370 	/*
    371 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    372 	 * CPUs. This queue intermediate them without block.
    373 	 */
    374 	pcq_t *txq_interq;
    375 
    376 	/*
    377 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    378 	 * to manage Tx H/W queue's busy flag.
    379 	 */
    380 	int txq_flags;			/* flags for H/W queue, see below */
    381 #define	WM_TXQ_NO_SPACE		0x1
    382 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
    383 
    384 	bool txq_stopping;
    385 
    386 	bool txq_sending;
    387 	time_t txq_lastsent;
    388 
    389 	/* Checksum flags used for previous packet */
    390 	uint32_t	txq_last_hw_cmd;
    391 	uint8_t		txq_last_hw_fields;
    392 	uint16_t	txq_last_hw_ipcs;
    393 	uint16_t	txq_last_hw_tucs;
    394 
    395 	uint32_t txq_packets;		/* for AIM */
    396 	uint32_t txq_bytes;		/* for AIM */
    397 #ifdef WM_EVENT_COUNTERS
    398 	/* TX event counters */
    399 	WM_Q_EVCNT_DEFINE(txq, txsstall);   /* Stalled due to no txs */
    400 	WM_Q_EVCNT_DEFINE(txq, txdstall);   /* Stalled due to no txd */
    401 	WM_Q_EVCNT_DEFINE(txq, fifo_stall); /* FIFO stalls (82547) */
    402 	WM_Q_EVCNT_DEFINE(txq, txdw);	    /* Tx descriptor interrupts */
    403 	WM_Q_EVCNT_DEFINE(txq, txqe);	    /* Tx queue empty interrupts */
    404 					    /* XXX not used? */
    405 
    406 	WM_Q_EVCNT_DEFINE(txq, ipsum);	    /* IP checksums comp. */
    407 	WM_Q_EVCNT_DEFINE(txq, tusum);	    /* TCP/UDP cksums comp. */
    408 	WM_Q_EVCNT_DEFINE(txq, tusum6);	    /* TCP/UDP v6 cksums comp. */
    409 	WM_Q_EVCNT_DEFINE(txq, tso);	    /* TCP seg offload (IPv4) */
    410 	WM_Q_EVCNT_DEFINE(txq, tso6);	    /* TCP seg offload (IPv6) */
    411 	WM_Q_EVCNT_DEFINE(txq, tsopain);    /* Painful header manip. for TSO */
    412 	WM_Q_EVCNT_DEFINE(txq, pcqdrop);    /* Pkt dropped in pcq */
    413 	WM_Q_EVCNT_DEFINE(txq, descdrop);   /* Pkt dropped in MAC desc ring */
    414 					    /* other than toomanyseg */
    415 
    416 	WM_Q_EVCNT_DEFINE(txq, toomanyseg); /* Pkt dropped(toomany DMA segs) */
    417 	WM_Q_EVCNT_DEFINE(txq, defrag);	    /* m_defrag() */
    418 	WM_Q_EVCNT_DEFINE(txq, underrun);   /* Tx underrun */
    419 	WM_Q_EVCNT_DEFINE(txq, skipcontext); /* Tx skip wrong cksum context */
    420 
    421 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    422 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    423 #endif /* WM_EVENT_COUNTERS */
    424 };
    425 
    426 struct wm_rxqueue {
    427 	kmutex_t *rxq_lock;		/* lock for rx operations */
    428 
    429 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    430 
    431 	/* Software state for the receive descriptors. */
    432 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    433 
    434 	/* RX control data structures. */
    435 	int rxq_ndesc;			/* must be a power of two */
    436 	size_t rxq_descsize;		/* a rx descriptor size */
    437 	rxdescs_t *rxq_descs_u;
    438 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    439 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    440 	int rxq_desc_rseg;		/* real number of control segment */
    441 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    442 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    443 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    444 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    445 
    446 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    447 
    448 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    449 	int rxq_discard;
    450 	int rxq_len;
    451 	struct mbuf *rxq_head;
    452 	struct mbuf *rxq_tail;
    453 	struct mbuf **rxq_tailp;
    454 
    455 	bool rxq_stopping;
    456 
    457 	uint32_t rxq_packets;		/* for AIM */
    458 	uint32_t rxq_bytes;		/* for AIM */
    459 #ifdef WM_EVENT_COUNTERS
    460 	/* RX event counters */
    461 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    462 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    463 
    464 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    465 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    466 #endif
    467 };
    468 
    469 struct wm_queue {
    470 	int wmq_id;			/* index of TX/RX queues */
    471 	int wmq_intr_idx;		/* index of MSI-X tables */
    472 
    473 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    474 	bool wmq_set_itr;
    475 
    476 	struct wm_txqueue wmq_txq;
    477 	struct wm_rxqueue wmq_rxq;
    478 	char sysctlname[32];		/* Name for sysctl */
    479 
    480 	bool wmq_txrx_use_workqueue;
    481 	bool wmq_wq_enqueued;
    482 	struct work wmq_cookie;
    483 	void *wmq_si;
    484 };
    485 
    486 struct wm_phyop {
    487 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
    488 	void (*release)(struct wm_softc *);
    489 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    490 	int (*writereg_locked)(device_t, int, int, uint16_t);
    491 	int reset_delay_us;
    492 	bool no_errprint;
    493 };
    494 
    495 struct wm_nvmop {
    496 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
    497 	void (*release)(struct wm_softc *);
    498 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    499 };
    500 
    501 /*
    502  * Software state per device.
    503  */
    504 struct wm_softc {
    505 	device_t sc_dev;		/* generic device information */
    506 	bus_space_tag_t sc_st;		/* bus space tag */
    507 	bus_space_handle_t sc_sh;	/* bus space handle */
    508 	bus_size_t sc_ss;		/* bus space size */
    509 	bus_space_tag_t sc_iot;		/* I/O space tag */
    510 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    511 	bus_size_t sc_ios;		/* I/O space size */
    512 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    513 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    514 	bus_size_t sc_flashs;		/* flash registers space size */
    515 	off_t sc_flashreg_offset;	/*
    516 					 * offset to flash registers from
    517 					 * start of BAR
    518 					 */
    519 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    520 
    521 	struct ethercom sc_ethercom;	/* Ethernet common data */
    522 	struct mii_data sc_mii;		/* MII/media information */
    523 
    524 	pci_chipset_tag_t sc_pc;
    525 	pcitag_t sc_pcitag;
    526 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    527 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    528 
    529 	uint16_t sc_pcidevid;		/* PCI device ID */
    530 	wm_chip_type sc_type;		/* MAC type */
    531 	int sc_rev;			/* MAC revision */
    532 	wm_phy_type sc_phytype;		/* PHY type */
    533 	uint8_t sc_sfptype;		/* SFP type */
    534 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    535 #define	WM_MEDIATYPE_UNKNOWN		0x00
    536 #define	WM_MEDIATYPE_FIBER		0x01
    537 #define	WM_MEDIATYPE_COPPER		0x02
    538 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    539 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    540 	int sc_flags;			/* flags; see below */
    541 	u_short sc_if_flags;		/* last if_flags */
    542 	int sc_ec_capenable;		/* last ec_capenable */
    543 	int sc_flowflags;		/* 802.3x flow control flags */
    544 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    545 	int sc_align_tweak;
    546 
    547 	void *sc_ihs[WM_MAX_NINTR];	/*
    548 					 * interrupt cookie.
    549 					 * - legacy and msi use sc_ihs[0] only
    550 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    551 					 */
    552 	pci_intr_handle_t *sc_intrs;	/*
    553 					 * legacy and msi use sc_intrs[0] only
    554 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    555 					 */
    556 	int sc_nintrs;			/* number of interrupts */
    557 
    558 	int sc_link_intr_idx;		/* index of MSI-X tables */
    559 
    560 	callout_t sc_tick_ch;		/* tick callout */
    561 	bool sc_core_stopping;
    562 
    563 	int sc_nvm_ver_major;
    564 	int sc_nvm_ver_minor;
    565 	int sc_nvm_ver_build;
    566 	int sc_nvm_addrbits;		/* NVM address bits */
    567 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    568 	int sc_ich8_flash_base;
    569 	int sc_ich8_flash_bank_size;
    570 	int sc_nvm_k1_enabled;
    571 
    572 	int sc_nqueues;
    573 	struct wm_queue *sc_queue;
    574 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    575 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    576 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    577 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    578 	struct workqueue *sc_queue_wq;
    579 	bool sc_txrx_use_workqueue;
    580 
    581 	int sc_affinity_offset;
    582 
    583 #ifdef WM_EVENT_COUNTERS
    584 	/* Event counters. */
    585 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    586 
    587 	/* >= WM_T_82542_2_1 */
    588 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    589 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    590 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    591 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    592 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    593 
    594 	struct evcnt sc_ev_crcerrs;	/* CRC Error */
    595 	struct evcnt sc_ev_algnerrc;	/* Alignment Error */
    596 	struct evcnt sc_ev_symerrc;	/* Symbol Error */
    597 	struct evcnt sc_ev_rxerrc;	/* Receive Error */
    598 	struct evcnt sc_ev_mpc;		/* Missed Packets */
    599 	struct evcnt sc_ev_scc;		/* Single Collision */
    600 	struct evcnt sc_ev_ecol;	/* Excessive Collision */
    601 	struct evcnt sc_ev_mcc;		/* Multiple Collision */
    602 	struct evcnt sc_ev_latecol;	/* Late Collision */
    603 	struct evcnt sc_ev_colc;	/* Collision */
    604 	struct evcnt sc_ev_cbtmpc;	/* Circuit Breaker Tx Mng. Packet */
    605 	struct evcnt sc_ev_dc;		/* Defer */
    606 	struct evcnt sc_ev_tncrs;	/* Tx-No CRS */
    607 	struct evcnt sc_ev_sec;		/* Sequence Error */
    608 
    609 	/* Old */
    610 	struct evcnt sc_ev_cexterr;	/* Carrier Extension Error */
    611 	/* New */
    612 	struct evcnt sc_ev_htdpmc;	/* Host Tx Discarded Pkts by MAC */
    613 
    614 	struct evcnt sc_ev_rlec;	/* Receive Length Error */
    615 	struct evcnt sc_ev_cbrdpc;	/* Circuit Breaker Rx Dropped Packet */
    616 	struct evcnt sc_ev_prc64;	/* Packets Rx (64 bytes) */
    617 	struct evcnt sc_ev_prc127;	/* Packets Rx (65-127 bytes) */
    618 	struct evcnt sc_ev_prc255;	/* Packets Rx (128-255 bytes) */
    619 	struct evcnt sc_ev_prc511;	/* Packets Rx (256-511 bytes) */
    620 	struct evcnt sc_ev_prc1023;	/* Packets Rx (512-1023 bytes) */
    621 	struct evcnt sc_ev_prc1522;	/* Packets Rx (1024-1522 bytes) */
    622 	struct evcnt sc_ev_gprc;	/* Good Packets Rx */
    623 	struct evcnt sc_ev_bprc;	/* Broadcast Packets Rx */
    624 	struct evcnt sc_ev_mprc;	/* Multicast Packets Rx */
    625 	struct evcnt sc_ev_gptc;	/* Good Packets Tx */
    626 	struct evcnt sc_ev_gorc;	/* Good Octets Rx */
    627 	struct evcnt sc_ev_gotc;	/* Good Octets Tx */
    628 	struct evcnt sc_ev_rnbc;	/* Rx No Buffers */
    629 	struct evcnt sc_ev_ruc;		/* Rx Undersize */
    630 	struct evcnt sc_ev_rfc;		/* Rx Fragment */
    631 	struct evcnt sc_ev_roc;		/* Rx Oversize */
    632 	struct evcnt sc_ev_rjc;		/* Rx Jabber */
    633 	struct evcnt sc_ev_mgtprc;	/* Management Packets RX */
    634 	struct evcnt sc_ev_mgtpdc;	/* Management Packets Dropped */
    635 	struct evcnt sc_ev_mgtptc;	/* Management Packets TX */
    636 	struct evcnt sc_ev_tor;		/* Total Octets Rx */
    637 	struct evcnt sc_ev_tot;		/* Total Octets Tx */
    638 	struct evcnt sc_ev_tpr;		/* Total Packets Rx */
    639 	struct evcnt sc_ev_tpt;		/* Total Packets Tx */
    640 	struct evcnt sc_ev_ptc64;	/* Packets Tx (64 bytes) */
    641 	struct evcnt sc_ev_ptc127;	/* Packets Tx (65-127 bytes) */
    642 	struct evcnt sc_ev_ptc255;	/* Packets Tx (128-255 bytes) */
    643 	struct evcnt sc_ev_ptc511;	/* Packets Tx (256-511 bytes) */
    644 	struct evcnt sc_ev_ptc1023;	/* Packets Tx (512-1023 bytes) */
    645 	struct evcnt sc_ev_ptc1522;	/* Packets Tx (1024-1522 Bytes) */
    646 	struct evcnt sc_ev_mptc;	/* Multicast Packets Tx */
    647 	struct evcnt sc_ev_bptc;	/* Broadcast Packets Tx */
    648 	struct evcnt sc_ev_tsctc;	/* TCP Segmentation Context Tx */
    649 
    650 	/* Old */
    651 	struct evcnt sc_ev_tsctfc;	/* TCP Segmentation Context Tx Fail */
    652 	/* New */
    653 	struct evcnt sc_ev_cbrmpc;	/* Circuit Breaker Rx Mng. Packet */
    654 
    655 	struct evcnt sc_ev_iac;		/* Interrupt Assertion */
    656 
    657 	/* Old */
    658 	struct evcnt sc_ev_icrxptc;	/* Intr. Cause Rx Pkt Timer Expire */
    659 	struct evcnt sc_ev_icrxatc;	/* Intr. Cause Rx Abs Timer Expire */
    660 	struct evcnt sc_ev_ictxptc;	/* Intr. Cause Tx Pkt Timer Expire */
    661 	struct evcnt sc_ev_ictxatc;	/* Intr. Cause Tx Abs Timer Expire */
    662 	struct evcnt sc_ev_ictxqec;	/* Intr. Cause Tx Queue Empty */
    663 	struct evcnt sc_ev_ictxqmtc;	/* Intr. Cause Tx Queue Min Thresh */
    664 	/*
    665 	 * sc_ev_rxdmtc is shared with both "Intr. cause" and
    666 	 * non "Intr. cause" register.
    667 	 */
    668 	struct evcnt sc_ev_rxdmtc;	/* (Intr. Cause) Rx Desc Min Thresh */
    669 	struct evcnt sc_ev_icrxoc;	/* Intr. Cause Receiver Overrun */
    670 	/* New */
    671 	struct evcnt sc_ev_rpthc;	/* Rx Packets To Host */
    672 	struct evcnt sc_ev_debug1;	/* Debug Counter 1 */
    673 	struct evcnt sc_ev_debug2;	/* Debug Counter 2 */
    674 	struct evcnt sc_ev_debug3;	/* Debug Counter 3 */
    675 	struct evcnt sc_ev_hgptc;	/* Host Good Packets TX */
    676 	struct evcnt sc_ev_debug4;	/* Debug Counter 4 */
    677 	struct evcnt sc_ev_htcbdpc;	/* Host Tx Circuit Breaker Drp. Pkts */
    678 	struct evcnt sc_ev_hgorc;	/* Host Good Octets Rx */
    679 	struct evcnt sc_ev_hgotc;	/* Host Good Octets Tx */
    680 	struct evcnt sc_ev_lenerrs;	/* Length Error */
    681 	struct evcnt sc_ev_tlpic;	/* EEE Tx LPI */
    682 	struct evcnt sc_ev_rlpic;	/* EEE Rx LPI */
    683 	struct evcnt sc_ev_b2ogprc;	/* BMC2OS pkts received by host */
    684 	struct evcnt sc_ev_o2bspc;	/* OS2BMC pkts transmitted by host */
    685 	struct evcnt sc_ev_b2ospc;	/* BMC2OS pkts sent by BMC */
    686 	struct evcnt sc_ev_o2bgptc;	/* OS2BMC pkts received by BMC */
    687 	struct evcnt sc_ev_scvpc;	/* SerDes/SGMII Code Violation Pkt. */
    688 	struct evcnt sc_ev_hrmpc;	/* Header Redirection Missed Packet */
    689 #endif /* WM_EVENT_COUNTERS */
    690 
    691 	struct sysctllog *sc_sysctllog;
    692 
    693 	/* This variable are used only on the 82547. */
    694 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    695 
    696 	uint32_t sc_ctrl;		/* prototype CTRL register */
    697 #if 0
    698 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    699 #endif
    700 	uint32_t sc_icr;		/* prototype interrupt bits */
    701 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    702 	uint32_t sc_tctl;		/* prototype TCTL register */
    703 	uint32_t sc_rctl;		/* prototype RCTL register */
    704 	uint32_t sc_txcw;		/* prototype TXCW register */
    705 	uint32_t sc_tipg;		/* prototype TIPG register */
    706 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    707 	uint32_t sc_pba;		/* prototype PBA register */
    708 
    709 	int sc_tbi_linkup;		/* TBI link status */
    710 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    711 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    712 
    713 	int sc_mchash_type;		/* multicast filter offset */
    714 
    715 	krndsource_t rnd_source;	/* random source */
    716 
    717 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    718 
    719 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    720 	kmutex_t *sc_ich_phymtx;	/*
    721 					 * 82574/82583/ICH/PCH specific PHY
    722 					 * mutex. For 82574/82583, the mutex
    723 					 * is used for both PHY and NVM.
    724 					 */
    725 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    726 
    727 	struct wm_phyop phy;
    728 	struct wm_nvmop nvm;
    729 
    730 	struct workqueue *sc_reset_wq;
    731 	struct work sc_reset_work;
    732 	volatile unsigned sc_reset_pending;
    733 
    734 	bool sc_dying;
    735 
    736 #ifdef WM_DEBUG
    737 	uint32_t sc_debug;
    738 	bool sc_trigger_reset;
    739 #endif
    740 };
    741 
    742 #define	WM_RXCHAIN_RESET(rxq)						\
    743 do {									\
    744 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    745 	*(rxq)->rxq_tailp = NULL;					\
    746 	(rxq)->rxq_len = 0;						\
    747 } while (/*CONSTCOND*/0)
    748 
    749 #define	WM_RXCHAIN_LINK(rxq, m)						\
    750 do {									\
    751 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    752 	(rxq)->rxq_tailp = &(m)->m_next;				\
    753 } while (/*CONSTCOND*/0)
    754 
    755 #ifdef WM_EVENT_COUNTERS
    756 #ifdef __HAVE_ATOMIC64_LOADSTORE
    757 #define	WM_EVCNT_INCR(ev)						\
    758 	atomic_store_relaxed(&((ev)->ev_count),				\
    759 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
    760 #define	WM_EVCNT_STORE(ev, val)						\
    761 	atomic_store_relaxed(&((ev)->ev_count), (val))
    762 #define	WM_EVCNT_ADD(ev, val)						\
    763 	atomic_store_relaxed(&((ev)->ev_count),				\
    764 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
    765 #else
    766 #define	WM_EVCNT_INCR(ev)						\
    767 	((ev)->ev_count)++
    768 #define	WM_EVCNT_STORE(ev, val)						\
    769 	((ev)->ev_count = (val))
    770 #define	WM_EVCNT_ADD(ev, val)						\
    771 	(ev)->ev_count += (val)
    772 #endif
    773 
    774 #define WM_Q_EVCNT_INCR(qname, evname)			\
    775 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    776 #define WM_Q_EVCNT_STORE(qname, evname, val)		\
    777 	WM_EVCNT_STORE(&(qname)->qname##_ev_##evname, (val))
    778 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    779 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    780 #else /* !WM_EVENT_COUNTERS */
    781 #define	WM_EVCNT_INCR(ev)	/* nothing */
    782 #define	WM_EVCNT_STORE(ev, val)	/* nothing */
    783 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    784 
    785 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    786 #define WM_Q_EVCNT_STORE(qname, evname, val)	/* nothing */
    787 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    788 #endif /* !WM_EVENT_COUNTERS */
    789 
    790 #define	CSR_READ(sc, reg)						\
    791 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    792 #define	CSR_WRITE(sc, reg, val)						\
    793 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    794 #define	CSR_WRITE_FLUSH(sc)						\
    795 	(void)CSR_READ((sc), WMREG_STATUS)
    796 
    797 #define ICH8_FLASH_READ32(sc, reg)					\
    798 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    799 	    (reg) + sc->sc_flashreg_offset)
    800 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    801 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    802 	    (reg) + sc->sc_flashreg_offset, (data))
    803 
    804 #define ICH8_FLASH_READ16(sc, reg)					\
    805 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    806 	    (reg) + sc->sc_flashreg_offset)
    807 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    808 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    809 	    (reg) + sc->sc_flashreg_offset, (data))
    810 
    811 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    812 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    813 
    814 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    815 #define	WM_CDTXADDR_HI(txq, x)						\
    816 	(sizeof(bus_addr_t) == 8 ?					\
    817 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    818 
    819 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    820 #define	WM_CDRXADDR_HI(rxq, x)						\
    821 	(sizeof(bus_addr_t) == 8 ?					\
    822 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    823 
    824 /*
    825  * Register read/write functions.
    826  * Other than CSR_{READ|WRITE}().
    827  */
    828 #if 0
    829 static inline uint32_t wm_io_read(struct wm_softc *, int);
    830 #endif
    831 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    832 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    833     uint32_t, uint32_t);
    834 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    835 
    836 /*
    837  * Descriptor sync/init functions.
    838  */
    839 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    840 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    841 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    842 
    843 /*
    844  * Device driver interface functions and commonly used functions.
    845  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    846  */
    847 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    848 static int	wm_match(device_t, cfdata_t, void *);
    849 static void	wm_attach(device_t, device_t, void *);
    850 static int	wm_detach(device_t, int);
    851 static bool	wm_suspend(device_t, const pmf_qual_t *);
    852 static bool	wm_resume(device_t, const pmf_qual_t *);
    853 static bool	wm_watchdog(struct ifnet *);
    854 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    855     uint16_t *);
    856 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    857     uint16_t *);
    858 static void	wm_tick(void *);
    859 static int	wm_ifflags_cb(struct ethercom *);
    860 static int	wm_ioctl(struct ifnet *, u_long, void *);
    861 /* MAC address related */
    862 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    863 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    864 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    865 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    866 static int	wm_rar_count(struct wm_softc *);
    867 static void	wm_set_filter(struct wm_softc *);
    868 /* Reset and init related */
    869 static void	wm_set_vlan(struct wm_softc *);
    870 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    871 static void	wm_get_auto_rd_done(struct wm_softc *);
    872 static void	wm_lan_init_done(struct wm_softc *);
    873 static void	wm_get_cfg_done(struct wm_softc *);
    874 static int	wm_phy_post_reset(struct wm_softc *);
    875 static int	wm_write_smbus_addr(struct wm_softc *);
    876 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    877 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    878 static void	wm_initialize_hardware_bits(struct wm_softc *);
    879 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    880 static int	wm_reset_phy(struct wm_softc *);
    881 static void	wm_flush_desc_rings(struct wm_softc *);
    882 static void	wm_reset(struct wm_softc *);
    883 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    884 static void	wm_rxdrain(struct wm_rxqueue *);
    885 static void	wm_init_rss(struct wm_softc *);
    886 static void	wm_adjust_qnum(struct wm_softc *, int);
    887 static inline bool	wm_is_using_msix(struct wm_softc *);
    888 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    889 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    890 static int	wm_setup_legacy(struct wm_softc *);
    891 static int	wm_setup_msix(struct wm_softc *);
    892 static int	wm_init(struct ifnet *);
    893 static int	wm_init_locked(struct ifnet *);
    894 static void	wm_init_sysctls(struct wm_softc *);
    895 static void	wm_update_stats(struct wm_softc *);
    896 static void	wm_clear_evcnt(struct wm_softc *);
    897 static void	wm_unset_stopping_flags(struct wm_softc *);
    898 static void	wm_set_stopping_flags(struct wm_softc *);
    899 static void	wm_stop(struct ifnet *, int);
    900 static void	wm_stop_locked(struct ifnet *, bool, bool);
    901 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    902 static void	wm_82547_txfifo_stall(void *);
    903 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    904 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    905 /* DMA related */
    906 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    907 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    908 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    909 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    910     struct wm_txqueue *);
    911 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    912 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    913 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    914     struct wm_rxqueue *);
    915 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    916 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    917 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    918 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    919 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    920 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    921 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    922     struct wm_txqueue *);
    923 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    924     struct wm_rxqueue *);
    925 static int	wm_alloc_txrx_queues(struct wm_softc *);
    926 static void	wm_free_txrx_queues(struct wm_softc *);
    927 static int	wm_init_txrx_queues(struct wm_softc *);
    928 /* Start */
    929 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    930     struct wm_txsoft *, uint32_t *, uint8_t *);
    931 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    932 static void	wm_start(struct ifnet *);
    933 static void	wm_start_locked(struct ifnet *);
    934 static int	wm_transmit(struct ifnet *, struct mbuf *);
    935 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    936 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    937     bool);
    938 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    939     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    940 static void	wm_nq_start(struct ifnet *);
    941 static void	wm_nq_start_locked(struct ifnet *);
    942 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    943 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    944 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    945     bool);
    946 static void	wm_deferred_start_locked(struct wm_txqueue *);
    947 static void	wm_handle_queue(void *);
    948 static void	wm_handle_queue_work(struct work *, void *);
    949 static void	wm_handle_reset_work(struct work *, void *);
    950 /* Interrupt */
    951 static bool	wm_txeof(struct wm_txqueue *, u_int);
    952 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    953 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    954 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    955 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    956 static void	wm_linkintr(struct wm_softc *, uint32_t);
    957 static int	wm_intr_legacy(void *);
    958 static inline void	wm_txrxintr_disable(struct wm_queue *);
    959 static inline void	wm_txrxintr_enable(struct wm_queue *);
    960 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    961 static int	wm_txrxintr_msix(void *);
    962 static int	wm_linkintr_msix(void *);
    963 
    964 /*
    965  * Media related.
    966  * GMII, SGMII, TBI, SERDES and SFP.
    967  */
    968 /* Common */
    969 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    970 /* GMII related */
    971 static void	wm_gmii_reset(struct wm_softc *);
    972 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    973 static int	wm_get_phy_id_82575(struct wm_softc *);
    974 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    975 static int	wm_gmii_mediachange(struct ifnet *);
    976 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    977 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    978 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    979 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    980 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    981 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    982 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    983 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    984 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    985 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    986 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    987 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    988 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    989 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    990 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    991 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    992 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    993 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    994 	bool);
    995 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    996 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    997 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    998 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    999 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
   1000 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
   1001 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
   1002 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
   1003 static void	wm_gmii_statchg(struct ifnet *);
   1004 /*
   1005  * kumeran related (80003, ICH* and PCH*).
   1006  * These functions are not for accessing MII registers but for accessing
   1007  * kumeran specific registers.
   1008  */
   1009 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
   1010 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
   1011 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
   1012 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
   1013 /* EMI register related */
   1014 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
   1015 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
   1016 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
   1017 /* SGMII */
   1018 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
   1019 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
   1020 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
   1021 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
   1022 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
   1023 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
   1024 /* TBI related */
   1025 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
   1026 static void	wm_tbi_mediainit(struct wm_softc *);
   1027 static int	wm_tbi_mediachange(struct ifnet *);
   1028 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
   1029 static int	wm_check_for_link(struct wm_softc *);
   1030 static void	wm_tbi_tick(struct wm_softc *);
   1031 /* SERDES related */
   1032 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
   1033 static int	wm_serdes_mediachange(struct ifnet *);
   1034 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
   1035 static void	wm_serdes_tick(struct wm_softc *);
   1036 /* SFP related */
   1037 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
   1038 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
   1039 
   1040 /*
   1041  * NVM related.
   1042  * Microwire, SPI (w/wo EERD) and Flash.
   1043  */
   1044 /* Misc functions */
   1045 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
   1046 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
   1047 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
   1048 /* Microwire */
   1049 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
   1050 /* SPI */
   1051 static int	wm_nvm_ready_spi(struct wm_softc *);
   1052 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
   1053 /* Using with EERD */
   1054 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
   1055 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
   1056 /* Flash */
   1057 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
   1058     unsigned int *);
   1059 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
   1060 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
   1061 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
   1062     uint32_t *);
   1063 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
   1064 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
   1065 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
   1066 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
   1067 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
   1068 /* iNVM */
   1069 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
   1070 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
   1071 /* Lock, detecting NVM type, validate checksum and read */
   1072 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
   1073 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
   1074 static int	wm_nvm_validate_checksum(struct wm_softc *);
   1075 static void	wm_nvm_version_invm(struct wm_softc *);
   1076 static void	wm_nvm_version(struct wm_softc *);
   1077 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
   1078 
   1079 /*
   1080  * Hardware semaphores.
   1081  * Very complexed...
   1082  */
   1083 static int	wm_get_null(struct wm_softc *);
   1084 static void	wm_put_null(struct wm_softc *);
   1085 static int	wm_get_eecd(struct wm_softc *);
   1086 static void	wm_put_eecd(struct wm_softc *);
   1087 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
   1088 static void	wm_put_swsm_semaphore(struct wm_softc *);
   1089 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
   1090 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
   1091 static int	wm_get_nvm_80003(struct wm_softc *);
   1092 static void	wm_put_nvm_80003(struct wm_softc *);
   1093 static int	wm_get_nvm_82571(struct wm_softc *);
   1094 static void	wm_put_nvm_82571(struct wm_softc *);
   1095 static int	wm_get_phy_82575(struct wm_softc *);
   1096 static void	wm_put_phy_82575(struct wm_softc *);
   1097 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
   1098 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
   1099 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
   1100 static void	wm_put_swflag_ich8lan(struct wm_softc *);
   1101 static int	wm_get_nvm_ich8lan(struct wm_softc *);
   1102 static void	wm_put_nvm_ich8lan(struct wm_softc *);
   1103 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
   1104 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
   1105 
   1106 /*
   1107  * Management mode and power management related subroutines.
   1108  * BMC, AMT, suspend/resume and EEE.
   1109  */
   1110 #if 0
   1111 static int	wm_check_mng_mode(struct wm_softc *);
   1112 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
   1113 static int	wm_check_mng_mode_82574(struct wm_softc *);
   1114 static int	wm_check_mng_mode_generic(struct wm_softc *);
   1115 #endif
   1116 static int	wm_enable_mng_pass_thru(struct wm_softc *);
   1117 static bool	wm_phy_resetisblocked(struct wm_softc *);
   1118 static void	wm_get_hw_control(struct wm_softc *);
   1119 static void	wm_release_hw_control(struct wm_softc *);
   1120 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
   1121 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
   1122 static void	wm_init_manageability(struct wm_softc *);
   1123 static void	wm_release_manageability(struct wm_softc *);
   1124 static void	wm_get_wakeup(struct wm_softc *);
   1125 static int	wm_ulp_disable(struct wm_softc *);
   1126 static int	wm_enable_phy_wakeup(struct wm_softc *);
   1127 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
   1128 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
   1129 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1130 static void	wm_enable_wakeup(struct wm_softc *);
   1131 static void	wm_disable_aspm(struct wm_softc *);
   1132 /* LPLU (Low Power Link Up) */
   1133 static void	wm_lplu_d0_disable(struct wm_softc *);
   1134 /* EEE */
   1135 static int	wm_set_eee_i350(struct wm_softc *);
   1136 static int	wm_set_eee_pchlan(struct wm_softc *);
   1137 static int	wm_set_eee(struct wm_softc *);
   1138 
   1139 /*
   1140  * Workarounds (mainly PHY related).
   1141  * Basically, PHY's workarounds are in the PHY drivers.
   1142  */
   1143 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1144 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1145 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1146 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1147 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
   1148 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
   1149 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1150 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1151 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1152 static int	wm_k1_workaround_lv(struct wm_softc *);
   1153 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1154 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1155 static int	wm_set_mdio_slow_mode_hv_locked(struct wm_softc *);
   1156 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1157 static void	wm_reset_init_script_82575(struct wm_softc *);
   1158 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1159 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1160 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1161 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1162 static int	wm_pll_workaround_i210(struct wm_softc *);
   1163 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1164 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
   1165 static void	wm_set_linkdown_discard(struct wm_softc *);
   1166 static void	wm_clear_linkdown_discard(struct wm_softc *);
   1167 
   1168 static int	wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
   1169 static int	wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
   1170 #ifdef WM_DEBUG
   1171 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
   1172 #endif
   1173 
   1174 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1175     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1176 
   1177 /*
   1178  * Devices supported by this driver.
   1179  */
   1180 static const struct wm_product {
   1181 	pci_vendor_id_t		wmp_vendor;
   1182 	pci_product_id_t	wmp_product;
   1183 	const char		*wmp_name;
   1184 	wm_chip_type		wmp_type;
   1185 	uint32_t		wmp_flags;
   1186 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1187 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1188 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1189 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1190 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1191 } wm_products[] = {
   1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1193 	  "Intel i82542 1000BASE-X Ethernet",
   1194 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1195 
   1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1197 	  "Intel i82543GC 1000BASE-X Ethernet",
   1198 	  WM_T_82543,		WMP_F_FIBER },
   1199 
   1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1201 	  "Intel i82543GC 1000BASE-T Ethernet",
   1202 	  WM_T_82543,		WMP_F_COPPER },
   1203 
   1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1205 	  "Intel i82544EI 1000BASE-T Ethernet",
   1206 	  WM_T_82544,		WMP_F_COPPER },
   1207 
   1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1209 	  "Intel i82544EI 1000BASE-X Ethernet",
   1210 	  WM_T_82544,		WMP_F_FIBER },
   1211 
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1213 	  "Intel i82544GC 1000BASE-T Ethernet",
   1214 	  WM_T_82544,		WMP_F_COPPER },
   1215 
   1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1217 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1218 	  WM_T_82544,		WMP_F_COPPER },
   1219 
   1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1221 	  "Intel i82540EM 1000BASE-T Ethernet",
   1222 	  WM_T_82540,		WMP_F_COPPER },
   1223 
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1225 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1226 	  WM_T_82540,		WMP_F_COPPER },
   1227 
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1229 	  "Intel i82540EP 1000BASE-T Ethernet",
   1230 	  WM_T_82540,		WMP_F_COPPER },
   1231 
   1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1233 	  "Intel i82540EP 1000BASE-T Ethernet",
   1234 	  WM_T_82540,		WMP_F_COPPER },
   1235 
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1237 	  "Intel i82540EP 1000BASE-T Ethernet",
   1238 	  WM_T_82540,		WMP_F_COPPER },
   1239 
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1241 	  "Intel i82545EM 1000BASE-T Ethernet",
   1242 	  WM_T_82545,		WMP_F_COPPER },
   1243 
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1245 	  "Intel i82545GM 1000BASE-T Ethernet",
   1246 	  WM_T_82545_3,		WMP_F_COPPER },
   1247 
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1249 	  "Intel i82545GM 1000BASE-X Ethernet",
   1250 	  WM_T_82545_3,		WMP_F_FIBER },
   1251 
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1253 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1254 	  WM_T_82545_3,		WMP_F_SERDES },
   1255 
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1257 	  "Intel i82546EB 1000BASE-T Ethernet",
   1258 	  WM_T_82546,		WMP_F_COPPER },
   1259 
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1261 	  "Intel i82546EB 1000BASE-T Ethernet",
   1262 	  WM_T_82546,		WMP_F_COPPER },
   1263 
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1265 	  "Intel i82545EM 1000BASE-X Ethernet",
   1266 	  WM_T_82545,		WMP_F_FIBER },
   1267 
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1269 	  "Intel i82546EB 1000BASE-X Ethernet",
   1270 	  WM_T_82546,		WMP_F_FIBER },
   1271 
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1273 	  "Intel i82546GB 1000BASE-T Ethernet",
   1274 	  WM_T_82546_3,		WMP_F_COPPER },
   1275 
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1277 	  "Intel i82546GB 1000BASE-X Ethernet",
   1278 	  WM_T_82546_3,		WMP_F_FIBER },
   1279 
   1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1281 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1282 	  WM_T_82546_3,		WMP_F_SERDES },
   1283 
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1285 	  "i82546GB quad-port Gigabit Ethernet",
   1286 	  WM_T_82546_3,		WMP_F_COPPER },
   1287 
   1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1289 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1290 	  WM_T_82546_3,		WMP_F_COPPER },
   1291 
   1292 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1293 	  "Intel PRO/1000MT (82546GB)",
   1294 	  WM_T_82546_3,		WMP_F_COPPER },
   1295 
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1297 	  "Intel i82541EI 1000BASE-T Ethernet",
   1298 	  WM_T_82541,		WMP_F_COPPER },
   1299 
   1300 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1301 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1302 	  WM_T_82541,		WMP_F_COPPER },
   1303 
   1304 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1305 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1306 	  WM_T_82541,		WMP_F_COPPER },
   1307 
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1309 	  "Intel i82541ER 1000BASE-T Ethernet",
   1310 	  WM_T_82541_2,		WMP_F_COPPER },
   1311 
   1312 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1313 	  "Intel i82541GI 1000BASE-T Ethernet",
   1314 	  WM_T_82541_2,		WMP_F_COPPER },
   1315 
   1316 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1317 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1318 	  WM_T_82541_2,		WMP_F_COPPER },
   1319 
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1321 	  "Intel i82541PI 1000BASE-T Ethernet",
   1322 	  WM_T_82541_2,		WMP_F_COPPER },
   1323 
   1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1325 	  "Intel i82547EI 1000BASE-T Ethernet",
   1326 	  WM_T_82547,		WMP_F_COPPER },
   1327 
   1328 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1329 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1330 	  WM_T_82547,		WMP_F_COPPER },
   1331 
   1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1333 	  "Intel i82547GI 1000BASE-T Ethernet",
   1334 	  WM_T_82547_2,		WMP_F_COPPER },
   1335 
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1337 	  "Intel PRO/1000 PT (82571EB)",
   1338 	  WM_T_82571,		WMP_F_COPPER },
   1339 
   1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1341 	  "Intel PRO/1000 PF (82571EB)",
   1342 	  WM_T_82571,		WMP_F_FIBER },
   1343 
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1345 	  "Intel PRO/1000 PB (82571EB)",
   1346 	  WM_T_82571,		WMP_F_SERDES },
   1347 
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1349 	  "Intel PRO/1000 QT (82571EB)",
   1350 	  WM_T_82571,		WMP_F_COPPER },
   1351 
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1353 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1354 	  WM_T_82571,		WMP_F_COPPER },
   1355 
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1357 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1358 	  WM_T_82571,		WMP_F_COPPER },
   1359 
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1361 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1362 	  WM_T_82571,		WMP_F_SERDES },
   1363 
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1365 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1366 	  WM_T_82571,		WMP_F_SERDES },
   1367 
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1369 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1370 	  WM_T_82571,		WMP_F_FIBER },
   1371 
   1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1373 	  "Intel i82572EI 1000baseT Ethernet",
   1374 	  WM_T_82572,		WMP_F_COPPER },
   1375 
   1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1377 	  "Intel i82572EI 1000baseX Ethernet",
   1378 	  WM_T_82572,		WMP_F_FIBER },
   1379 
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1381 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1382 	  WM_T_82572,		WMP_F_SERDES },
   1383 
   1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1385 	  "Intel i82572EI 1000baseT Ethernet",
   1386 	  WM_T_82572,		WMP_F_COPPER },
   1387 
   1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1389 	  "Intel i82573E",
   1390 	  WM_T_82573,		WMP_F_COPPER },
   1391 
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1393 	  "Intel i82573E IAMT",
   1394 	  WM_T_82573,		WMP_F_COPPER },
   1395 
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1397 	  "Intel i82573L Gigabit Ethernet",
   1398 	  WM_T_82573,		WMP_F_COPPER },
   1399 
   1400 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1401 	  "Intel i82574L",
   1402 	  WM_T_82574,		WMP_F_COPPER },
   1403 
   1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1405 	  "Intel i82574L",
   1406 	  WM_T_82574,		WMP_F_COPPER },
   1407 
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1409 	  "Intel i82583V",
   1410 	  WM_T_82583,		WMP_F_COPPER },
   1411 
   1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1413 	  "i80003 dual 1000baseT Ethernet",
   1414 	  WM_T_80003,		WMP_F_COPPER },
   1415 
   1416 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1417 	  "i80003 dual 1000baseX Ethernet",
   1418 	  WM_T_80003,		WMP_F_COPPER },
   1419 
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1421 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1422 	  WM_T_80003,		WMP_F_SERDES },
   1423 
   1424 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1425 	  "Intel i80003 1000baseT Ethernet",
   1426 	  WM_T_80003,		WMP_F_COPPER },
   1427 
   1428 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1429 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1430 	  WM_T_80003,		WMP_F_SERDES },
   1431 
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1433 	  "Intel i82801H (M_AMT) LAN Controller",
   1434 	  WM_T_ICH8,		WMP_F_COPPER },
   1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1436 	  "Intel i82801H (AMT) LAN Controller",
   1437 	  WM_T_ICH8,		WMP_F_COPPER },
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1439 	  "Intel i82801H LAN Controller",
   1440 	  WM_T_ICH8,		WMP_F_COPPER },
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1442 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1443 	  WM_T_ICH8,		WMP_F_COPPER },
   1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1445 	  "Intel i82801H (M) LAN Controller",
   1446 	  WM_T_ICH8,		WMP_F_COPPER },
   1447 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1448 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1449 	  WM_T_ICH8,		WMP_F_COPPER },
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1451 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1452 	  WM_T_ICH8,		WMP_F_COPPER },
   1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1454 	  "82567V-3 LAN Controller",
   1455 	  WM_T_ICH8,		WMP_F_COPPER },
   1456 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1457 	  "82801I (AMT) LAN Controller",
   1458 	  WM_T_ICH9,		WMP_F_COPPER },
   1459 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1460 	  "82801I 10/100 LAN Controller",
   1461 	  WM_T_ICH9,		WMP_F_COPPER },
   1462 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1463 	  "82801I (G) 10/100 LAN Controller",
   1464 	  WM_T_ICH9,		WMP_F_COPPER },
   1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1466 	  "82801I (GT) 10/100 LAN Controller",
   1467 	  WM_T_ICH9,		WMP_F_COPPER },
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1469 	  "82801I (C) LAN Controller",
   1470 	  WM_T_ICH9,		WMP_F_COPPER },
   1471 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1472 	  "82801I mobile LAN Controller",
   1473 	  WM_T_ICH9,		WMP_F_COPPER },
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1475 	  "82801I mobile (V) LAN Controller",
   1476 	  WM_T_ICH9,		WMP_F_COPPER },
   1477 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1478 	  "82801I mobile (AMT) LAN Controller",
   1479 	  WM_T_ICH9,		WMP_F_COPPER },
   1480 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1481 	  "82567LM-4 LAN Controller",
   1482 	  WM_T_ICH9,		WMP_F_COPPER },
   1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1484 	  "82567LM-2 LAN Controller",
   1485 	  WM_T_ICH10,		WMP_F_COPPER },
   1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1487 	  "82567LF-2 LAN Controller",
   1488 	  WM_T_ICH10,		WMP_F_COPPER },
   1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1490 	  "82567LM-3 LAN Controller",
   1491 	  WM_T_ICH10,		WMP_F_COPPER },
   1492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1493 	  "82567LF-3 LAN Controller",
   1494 	  WM_T_ICH10,		WMP_F_COPPER },
   1495 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1496 	  "82567V-2 LAN Controller",
   1497 	  WM_T_ICH10,		WMP_F_COPPER },
   1498 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1499 	  "82567V-3? LAN Controller",
   1500 	  WM_T_ICH10,		WMP_F_COPPER },
   1501 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1502 	  "HANKSVILLE LAN Controller",
   1503 	  WM_T_ICH10,		WMP_F_COPPER },
   1504 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1505 	  "PCH LAN (82577LM) Controller",
   1506 	  WM_T_PCH,		WMP_F_COPPER },
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1508 	  "PCH LAN (82577LC) Controller",
   1509 	  WM_T_PCH,		WMP_F_COPPER },
   1510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1511 	  "PCH LAN (82578DM) Controller",
   1512 	  WM_T_PCH,		WMP_F_COPPER },
   1513 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1514 	  "PCH LAN (82578DC) Controller",
   1515 	  WM_T_PCH,		WMP_F_COPPER },
   1516 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1517 	  "PCH2 LAN (82579LM) Controller",
   1518 	  WM_T_PCH2,		WMP_F_COPPER },
   1519 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1520 	  "PCH2 LAN (82579V) Controller",
   1521 	  WM_T_PCH2,		WMP_F_COPPER },
   1522 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1523 	  "82575EB dual-1000baseT Ethernet",
   1524 	  WM_T_82575,		WMP_F_COPPER },
   1525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1526 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1527 	  WM_T_82575,		WMP_F_SERDES },
   1528 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1529 	  "82575GB quad-1000baseT Ethernet",
   1530 	  WM_T_82575,		WMP_F_COPPER },
   1531 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1532 	  "82575GB quad-1000baseT Ethernet (PM)",
   1533 	  WM_T_82575,		WMP_F_COPPER },
   1534 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1535 	  "82576 1000BaseT Ethernet",
   1536 	  WM_T_82576,		WMP_F_COPPER },
   1537 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1538 	  "82576 1000BaseX Ethernet",
   1539 	  WM_T_82576,		WMP_F_FIBER },
   1540 
   1541 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1542 	  "82576 gigabit Ethernet (SERDES)",
   1543 	  WM_T_82576,		WMP_F_SERDES },
   1544 
   1545 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1546 	  "82576 quad-1000BaseT Ethernet",
   1547 	  WM_T_82576,		WMP_F_COPPER },
   1548 
   1549 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1550 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1551 	  WM_T_82576,		WMP_F_COPPER },
   1552 
   1553 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1554 	  "82576 gigabit Ethernet",
   1555 	  WM_T_82576,		WMP_F_COPPER },
   1556 
   1557 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1558 	  "82576 gigabit Ethernet (SERDES)",
   1559 	  WM_T_82576,		WMP_F_SERDES },
   1560 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1561 	  "82576 quad-gigabit Ethernet (SERDES)",
   1562 	  WM_T_82576,		WMP_F_SERDES },
   1563 
   1564 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1565 	  "82580 1000BaseT Ethernet",
   1566 	  WM_T_82580,		WMP_F_COPPER },
   1567 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1568 	  "82580 1000BaseX Ethernet",
   1569 	  WM_T_82580,		WMP_F_FIBER },
   1570 
   1571 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1572 	  "82580 1000BaseT Ethernet (SERDES)",
   1573 	  WM_T_82580,		WMP_F_SERDES },
   1574 
   1575 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1576 	  "82580 gigabit Ethernet (SGMII)",
   1577 	  WM_T_82580,		WMP_F_COPPER },
   1578 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1579 	  "82580 dual-1000BaseT Ethernet",
   1580 	  WM_T_82580,		WMP_F_COPPER },
   1581 
   1582 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1583 	  "82580 quad-1000BaseX Ethernet",
   1584 	  WM_T_82580,		WMP_F_FIBER },
   1585 
   1586 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1587 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1588 	  WM_T_82580,		WMP_F_COPPER },
   1589 
   1590 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1591 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1592 	  WM_T_82580,		WMP_F_SERDES },
   1593 
   1594 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1595 	  "DH89XXCC 1000BASE-KX Ethernet",
   1596 	  WM_T_82580,		WMP_F_SERDES },
   1597 
   1598 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1599 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1600 	  WM_T_82580,		WMP_F_SERDES },
   1601 
   1602 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1603 	  "I350 Gigabit Network Connection",
   1604 	  WM_T_I350,		WMP_F_COPPER },
   1605 
   1606 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1607 	  "I350 Gigabit Fiber Network Connection",
   1608 	  WM_T_I350,		WMP_F_FIBER },
   1609 
   1610 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1611 	  "I350 Gigabit Backplane Connection",
   1612 	  WM_T_I350,		WMP_F_SERDES },
   1613 
   1614 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1615 	  "I350 Quad Port Gigabit Ethernet",
   1616 	  WM_T_I350,		WMP_F_SERDES },
   1617 
   1618 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1619 	  "I350 Gigabit Connection",
   1620 	  WM_T_I350,		WMP_F_COPPER },
   1621 
   1622 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1623 	  "I354 Gigabit Ethernet (KX)",
   1624 	  WM_T_I354,		WMP_F_SERDES },
   1625 
   1626 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1627 	  "I354 Gigabit Ethernet (SGMII)",
   1628 	  WM_T_I354,		WMP_F_COPPER },
   1629 
   1630 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1631 	  "I354 Gigabit Ethernet (2.5G)",
   1632 	  WM_T_I354,		WMP_F_COPPER },
   1633 
   1634 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1635 	  "I210-T1 Ethernet Server Adapter",
   1636 	  WM_T_I210,		WMP_F_COPPER },
   1637 
   1638 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1639 	  "I210 Ethernet (Copper OEM)",
   1640 	  WM_T_I210,		WMP_F_COPPER },
   1641 
   1642 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1643 	  "I210 Ethernet (Copper IT)",
   1644 	  WM_T_I210,		WMP_F_COPPER },
   1645 
   1646 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1647 	  "I210 Ethernet (Copper, FLASH less)",
   1648 	  WM_T_I210,		WMP_F_COPPER },
   1649 
   1650 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1651 	  "I210 Gigabit Ethernet (Fiber)",
   1652 	  WM_T_I210,		WMP_F_FIBER },
   1653 
   1654 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1655 	  "I210 Gigabit Ethernet (SERDES)",
   1656 	  WM_T_I210,		WMP_F_SERDES },
   1657 
   1658 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1659 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1660 	  WM_T_I210,		WMP_F_SERDES },
   1661 
   1662 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1663 	  "I210 Gigabit Ethernet (SGMII)",
   1664 	  WM_T_I210,		WMP_F_COPPER },
   1665 
   1666 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1667 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1668 	  WM_T_I210,		WMP_F_COPPER },
   1669 
   1670 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1671 	  "I211 Ethernet (COPPER)",
   1672 	  WM_T_I211,		WMP_F_COPPER },
   1673 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1674 	  "I217 V Ethernet Connection",
   1675 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1676 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1677 	  "I217 LM Ethernet Connection",
   1678 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1679 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1680 	  "I218 V Ethernet Connection",
   1681 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1682 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1683 	  "I218 V Ethernet Connection",
   1684 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1685 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1686 	  "I218 V Ethernet Connection",
   1687 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1688 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1689 	  "I218 LM Ethernet Connection",
   1690 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1691 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1692 	  "I218 LM Ethernet Connection",
   1693 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1694 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1695 	  "I218 LM Ethernet Connection",
   1696 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1697 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1698 	  "I219 LM Ethernet Connection",
   1699 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1700 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1701 	  "I219 LM (2) Ethernet Connection",
   1702 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1703 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1704 	  "I219 LM (3) Ethernet Connection",
   1705 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1706 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1707 	  "I219 LM (4) Ethernet Connection",
   1708 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1709 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1710 	  "I219 LM (5) Ethernet Connection",
   1711 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1712 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1713 	  "I219 LM (6) Ethernet Connection",
   1714 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1715 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1716 	  "I219 LM (7) Ethernet Connection",
   1717 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1718 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1719 	  "I219 LM (8) Ethernet Connection",
   1720 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1721 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1722 	  "I219 LM (9) Ethernet Connection",
   1723 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1724 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1725 	  "I219 LM (10) Ethernet Connection",
   1726 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1727 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1728 	  "I219 LM (11) Ethernet Connection",
   1729 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1730 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1731 	  "I219 LM (12) Ethernet Connection",
   1732 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1733 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1734 	  "I219 LM (13) Ethernet Connection",
   1735 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1736 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1737 	  "I219 LM (14) Ethernet Connection",
   1738 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1739 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1740 	  "I219 LM (15) Ethernet Connection",
   1741 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1742 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM16,
   1743 	  "I219 LM (16) Ethernet Connection",
   1744 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1745 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM17,
   1746 	  "I219 LM (17) Ethernet Connection",
   1747 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1748 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM18,
   1749 	  "I219 LM (18) Ethernet Connection",
   1750 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1751 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM19,
   1752 	  "I219 LM (19) Ethernet Connection",
   1753 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1754 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1755 	  "I219 V Ethernet Connection",
   1756 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1757 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1758 	  "I219 V (2) Ethernet Connection",
   1759 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1760 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1761 	  "I219 V (4) Ethernet Connection",
   1762 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1763 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1764 	  "I219 V (5) Ethernet Connection",
   1765 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1766 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1767 	  "I219 V (6) Ethernet Connection",
   1768 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1769 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1770 	  "I219 V (7) Ethernet Connection",
   1771 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1772 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1773 	  "I219 V (8) Ethernet Connection",
   1774 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1775 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1776 	  "I219 V (9) Ethernet Connection",
   1777 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1778 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1779 	  "I219 V (10) Ethernet Connection",
   1780 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1781 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1782 	  "I219 V (11) Ethernet Connection",
   1783 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1784 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1785 	  "I219 V (12) Ethernet Connection",
   1786 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1787 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1788 	  "I219 V (13) Ethernet Connection",
   1789 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1790 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1791 	  "I219 V (14) Ethernet Connection",
   1792 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1793 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V15,
   1794 	  "I219 V (15) Ethernet Connection",
   1795 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1796 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V16,
   1797 	  "I219 V (16) Ethernet Connection",
   1798 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1799 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V17,
   1800 	  "I219 V (17) Ethernet Connection",
   1801 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1802 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V18,
   1803 	  "I219 V (18) Ethernet Connection",
   1804 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1805 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V19,
   1806 	  "I219 V (19) Ethernet Connection",
   1807 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1808 	{ 0,			0,
   1809 	  NULL,
   1810 	  0,			0 },
   1811 };
   1812 
   1813 /*
   1814  * Register read/write functions.
   1815  * Other than CSR_{READ|WRITE}().
   1816  */
   1817 
   1818 #if 0 /* Not currently used */
   1819 static inline uint32_t
   1820 wm_io_read(struct wm_softc *sc, int reg)
   1821 {
   1822 
   1823 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1824 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1825 }
   1826 #endif
   1827 
   1828 static inline void
   1829 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1830 {
   1831 
   1832 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1833 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1834 }
   1835 
   1836 static inline void
   1837 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1838     uint32_t data)
   1839 {
   1840 	uint32_t regval;
   1841 	int i;
   1842 
   1843 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1844 
   1845 	CSR_WRITE(sc, reg, regval);
   1846 
   1847 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1848 		delay(5);
   1849 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1850 			break;
   1851 	}
   1852 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1853 		aprint_error("%s: WARNING:"
   1854 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1855 		    device_xname(sc->sc_dev), reg);
   1856 	}
   1857 }
   1858 
   1859 static inline void
   1860 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1861 {
   1862 	wa->wa_low = htole32(BUS_ADDR_LO32(v));
   1863 	wa->wa_high = htole32(BUS_ADDR_HI32(v));
   1864 }
   1865 
   1866 /*
   1867  * Descriptor sync/init functions.
   1868  */
   1869 static inline void
   1870 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1871 {
   1872 	struct wm_softc *sc = txq->txq_sc;
   1873 
   1874 	/* If it will wrap around, sync to the end of the ring. */
   1875 	if ((start + num) > WM_NTXDESC(txq)) {
   1876 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1877 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1878 		    (WM_NTXDESC(txq) - start), ops);
   1879 		num -= (WM_NTXDESC(txq) - start);
   1880 		start = 0;
   1881 	}
   1882 
   1883 	/* Now sync whatever is left. */
   1884 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1885 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1886 }
   1887 
   1888 static inline void
   1889 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1890 {
   1891 	struct wm_softc *sc = rxq->rxq_sc;
   1892 
   1893 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1894 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1895 }
   1896 
   1897 static inline void
   1898 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1899 {
   1900 	struct wm_softc *sc = rxq->rxq_sc;
   1901 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1902 	struct mbuf *m = rxs->rxs_mbuf;
   1903 
   1904 	/*
   1905 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1906 	 * so that the payload after the Ethernet header is aligned
   1907 	 * to a 4-byte boundary.
   1908 
   1909 	 * XXX BRAINDAMAGE ALERT!
   1910 	 * The stupid chip uses the same size for every buffer, which
   1911 	 * is set in the Receive Control register.  We are using the 2K
   1912 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1913 	 * reason, we can't "scoot" packets longer than the standard
   1914 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1915 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1916 	 * the upper layer copy the headers.
   1917 	 */
   1918 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1919 
   1920 	if (sc->sc_type == WM_T_82574) {
   1921 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1922 		rxd->erx_data.erxd_addr =
   1923 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1924 		rxd->erx_data.erxd_dd = 0;
   1925 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1926 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1927 
   1928 		rxd->nqrx_data.nrxd_paddr =
   1929 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1930 		/* Currently, split header is not supported. */
   1931 		rxd->nqrx_data.nrxd_haddr = 0;
   1932 	} else {
   1933 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1934 
   1935 		wm_set_dma_addr(&rxd->wrx_addr,
   1936 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1937 		rxd->wrx_len = 0;
   1938 		rxd->wrx_cksum = 0;
   1939 		rxd->wrx_status = 0;
   1940 		rxd->wrx_errors = 0;
   1941 		rxd->wrx_special = 0;
   1942 	}
   1943 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1944 
   1945 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1946 }
   1947 
   1948 /*
   1949  * Device driver interface functions and commonly used functions.
   1950  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1951  */
   1952 
   1953 /* Lookup supported device table */
   1954 static const struct wm_product *
   1955 wm_lookup(const struct pci_attach_args *pa)
   1956 {
   1957 	const struct wm_product *wmp;
   1958 
   1959 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1960 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1961 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1962 			return wmp;
   1963 	}
   1964 	return NULL;
   1965 }
   1966 
   1967 /* The match function (ca_match) */
   1968 static int
   1969 wm_match(device_t parent, cfdata_t cf, void *aux)
   1970 {
   1971 	struct pci_attach_args *pa = aux;
   1972 
   1973 	if (wm_lookup(pa) != NULL)
   1974 		return 1;
   1975 
   1976 	return 0;
   1977 }
   1978 
   1979 /* The attach function (ca_attach) */
   1980 static void
   1981 wm_attach(device_t parent, device_t self, void *aux)
   1982 {
   1983 	struct wm_softc *sc = device_private(self);
   1984 	struct pci_attach_args *pa = aux;
   1985 	prop_dictionary_t dict;
   1986 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1987 	pci_chipset_tag_t pc = pa->pa_pc;
   1988 	int counts[PCI_INTR_TYPE_SIZE];
   1989 	pci_intr_type_t max_type;
   1990 	const char *eetype, *xname;
   1991 	bus_space_tag_t memt;
   1992 	bus_space_handle_t memh;
   1993 	bus_size_t memsize;
   1994 	int memh_valid;
   1995 	int i, error;
   1996 	const struct wm_product *wmp;
   1997 	prop_data_t ea;
   1998 	prop_number_t pn;
   1999 	uint8_t enaddr[ETHER_ADDR_LEN];
   2000 	char buf[256];
   2001 	char wqname[MAXCOMLEN];
   2002 	uint16_t cfg1, cfg2, swdpin, nvmword;
   2003 	pcireg_t preg, memtype;
   2004 	uint16_t eeprom_data, apme_mask;
   2005 	bool force_clear_smbi;
   2006 	uint32_t link_mode;
   2007 	uint32_t reg;
   2008 
   2009 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
   2010 	sc->sc_debug = WM_DEBUG_DEFAULT;
   2011 #endif
   2012 	sc->sc_dev = self;
   2013 	callout_init(&sc->sc_tick_ch, CALLOUT_MPSAFE);
   2014 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   2015 	sc->sc_core_stopping = false;
   2016 
   2017 	wmp = wm_lookup(pa);
   2018 #ifdef DIAGNOSTIC
   2019 	if (wmp == NULL) {
   2020 		printf("\n");
   2021 		panic("wm_attach: impossible");
   2022 	}
   2023 #endif
   2024 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   2025 
   2026 	sc->sc_pc = pa->pa_pc;
   2027 	sc->sc_pcitag = pa->pa_tag;
   2028 
   2029 	if (pci_dma64_available(pa)) {
   2030 		aprint_verbose(", 64-bit DMA");
   2031 		sc->sc_dmat = pa->pa_dmat64;
   2032 	} else {
   2033 		aprint_verbose(", 32-bit DMA");
   2034 		sc->sc_dmat = pa->pa_dmat;
   2035 	}
   2036 
   2037 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   2038 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   2039 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   2040 
   2041 	sc->sc_type = wmp->wmp_type;
   2042 
   2043 	/* Set default function pointers */
   2044 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   2045 	sc->phy.release = sc->nvm.release = wm_put_null;
   2046 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   2047 
   2048 	if (sc->sc_type < WM_T_82543) {
   2049 		if (sc->sc_rev < 2) {
   2050 			aprint_error_dev(sc->sc_dev,
   2051 			    "i82542 must be at least rev. 2\n");
   2052 			return;
   2053 		}
   2054 		if (sc->sc_rev < 3)
   2055 			sc->sc_type = WM_T_82542_2_0;
   2056 	}
   2057 
   2058 	/*
   2059 	 * Disable MSI for Errata:
   2060 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   2061 	 *
   2062 	 *  82544: Errata 25
   2063 	 *  82540: Errata  6 (easy to reproduce device timeout)
   2064 	 *  82545: Errata  4 (easy to reproduce device timeout)
   2065 	 *  82546: Errata 26 (easy to reproduce device timeout)
   2066 	 *  82541: Errata  7 (easy to reproduce device timeout)
   2067 	 *
   2068 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   2069 	 *
   2070 	 *  82571 & 82572: Errata 63
   2071 	 */
   2072 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   2073 	    || (sc->sc_type == WM_T_82572))
   2074 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   2075 
   2076 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2077 	    || (sc->sc_type == WM_T_82580)
   2078 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   2079 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   2080 		sc->sc_flags |= WM_F_NEWQUEUE;
   2081 
   2082 	/* Set device properties (mactype) */
   2083 	dict = device_properties(sc->sc_dev);
   2084 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   2085 
   2086 	/*
   2087 	 * Map the device.  All devices support memory-mapped acccess,
   2088 	 * and it is really required for normal operation.
   2089 	 */
   2090 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   2091 	switch (memtype) {
   2092 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   2093 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   2094 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   2095 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   2096 		break;
   2097 	default:
   2098 		memh_valid = 0;
   2099 		break;
   2100 	}
   2101 
   2102 	if (memh_valid) {
   2103 		sc->sc_st = memt;
   2104 		sc->sc_sh = memh;
   2105 		sc->sc_ss = memsize;
   2106 	} else {
   2107 		aprint_error_dev(sc->sc_dev,
   2108 		    "unable to map device registers\n");
   2109 		return;
   2110 	}
   2111 
   2112 	/*
   2113 	 * In addition, i82544 and later support I/O mapped indirect
   2114 	 * register access.  It is not desirable (nor supported in
   2115 	 * this driver) to use it for normal operation, though it is
   2116 	 * required to work around bugs in some chip versions.
   2117 	 */
   2118 	switch (sc->sc_type) {
   2119 	case WM_T_82544:
   2120 	case WM_T_82541:
   2121 	case WM_T_82541_2:
   2122 	case WM_T_82547:
   2123 	case WM_T_82547_2:
   2124 		/* First we have to find the I/O BAR. */
   2125 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   2126 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   2127 			if (memtype == PCI_MAPREG_TYPE_IO)
   2128 				break;
   2129 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   2130 			    PCI_MAPREG_MEM_TYPE_64BIT)
   2131 				i += 4;	/* skip high bits, too */
   2132 		}
   2133 		if (i < PCI_MAPREG_END) {
   2134 			/*
   2135 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   2136 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   2137 			 * It's no problem because newer chips has no this
   2138 			 * bug.
   2139 			 *
   2140 			 * The i8254x doesn't apparently respond when the
   2141 			 * I/O BAR is 0, which looks somewhat like it's not
   2142 			 * been configured.
   2143 			 */
   2144 			preg = pci_conf_read(pc, pa->pa_tag, i);
   2145 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   2146 				aprint_error_dev(sc->sc_dev,
   2147 				    "WARNING: I/O BAR at zero.\n");
   2148 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   2149 			    0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)
   2150 			    == 0) {
   2151 				sc->sc_flags |= WM_F_IOH_VALID;
   2152 			} else
   2153 				aprint_error_dev(sc->sc_dev,
   2154 				    "WARNING: unable to map I/O space\n");
   2155 		}
   2156 		break;
   2157 	default:
   2158 		break;
   2159 	}
   2160 
   2161 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   2162 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   2163 	preg |= PCI_COMMAND_MASTER_ENABLE;
   2164 	if (sc->sc_type < WM_T_82542_2_1)
   2165 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   2166 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   2167 
   2168 	/* Power up chip */
   2169 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   2170 	    && error != EOPNOTSUPP) {
   2171 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   2172 		return;
   2173 	}
   2174 
   2175 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   2176 	/*
   2177 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   2178 	 * resource.
   2179 	 */
   2180 	if (sc->sc_nqueues > 1) {
   2181 		max_type = PCI_INTR_TYPE_MSIX;
   2182 		/*
   2183 		 *  82583 has a MSI-X capability in the PCI configuration space
   2184 		 * but it doesn't support it. At least the document doesn't
   2185 		 * say anything about MSI-X.
   2186 		 */
   2187 		counts[PCI_INTR_TYPE_MSIX]
   2188 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2189 	} else {
   2190 		max_type = PCI_INTR_TYPE_MSI;
   2191 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2192 	}
   2193 
   2194 	/* Allocation settings */
   2195 	counts[PCI_INTR_TYPE_MSI] = 1;
   2196 	counts[PCI_INTR_TYPE_INTX] = 1;
   2197 	/* overridden by disable flags */
   2198 	if (wm_disable_msi != 0) {
   2199 		counts[PCI_INTR_TYPE_MSI] = 0;
   2200 		if (wm_disable_msix != 0) {
   2201 			max_type = PCI_INTR_TYPE_INTX;
   2202 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2203 		}
   2204 	} else if (wm_disable_msix != 0) {
   2205 		max_type = PCI_INTR_TYPE_MSI;
   2206 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2207 	}
   2208 
   2209 alloc_retry:
   2210 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2211 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2212 		return;
   2213 	}
   2214 
   2215 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2216 		error = wm_setup_msix(sc);
   2217 		if (error) {
   2218 			pci_intr_release(pc, sc->sc_intrs,
   2219 			    counts[PCI_INTR_TYPE_MSIX]);
   2220 
   2221 			/* Setup for MSI: Disable MSI-X */
   2222 			max_type = PCI_INTR_TYPE_MSI;
   2223 			counts[PCI_INTR_TYPE_MSI] = 1;
   2224 			counts[PCI_INTR_TYPE_INTX] = 1;
   2225 			goto alloc_retry;
   2226 		}
   2227 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2228 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2229 		error = wm_setup_legacy(sc);
   2230 		if (error) {
   2231 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2232 			    counts[PCI_INTR_TYPE_MSI]);
   2233 
   2234 			/* The next try is for INTx: Disable MSI */
   2235 			max_type = PCI_INTR_TYPE_INTX;
   2236 			counts[PCI_INTR_TYPE_INTX] = 1;
   2237 			goto alloc_retry;
   2238 		}
   2239 	} else {
   2240 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2241 		error = wm_setup_legacy(sc);
   2242 		if (error) {
   2243 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2244 			    counts[PCI_INTR_TYPE_INTX]);
   2245 			return;
   2246 		}
   2247 	}
   2248 
   2249 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2250 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2251 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2252 	    WQ_PERCPU | WQ_MPSAFE);
   2253 	if (error) {
   2254 		aprint_error_dev(sc->sc_dev,
   2255 		    "unable to create TxRx workqueue\n");
   2256 		goto out;
   2257 	}
   2258 
   2259 	snprintf(wqname, sizeof(wqname), "%sReset", device_xname(sc->sc_dev));
   2260 	error = workqueue_create(&sc->sc_reset_wq, wqname,
   2261 	    wm_handle_reset_work, sc, WM_WORKQUEUE_PRI, IPL_SOFTCLOCK,
   2262 	    WQ_MPSAFE);
   2263 	if (error) {
   2264 		workqueue_destroy(sc->sc_queue_wq);
   2265 		aprint_error_dev(sc->sc_dev,
   2266 		    "unable to create reset workqueue\n");
   2267 		goto out;
   2268 	}
   2269 
   2270 	/*
   2271 	 * Check the function ID (unit number of the chip).
   2272 	 */
   2273 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2274 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2275 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2276 	    || (sc->sc_type == WM_T_82580)
   2277 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2278 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2279 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2280 	else
   2281 		sc->sc_funcid = 0;
   2282 
   2283 	/*
   2284 	 * Determine a few things about the bus we're connected to.
   2285 	 */
   2286 	if (sc->sc_type < WM_T_82543) {
   2287 		/* We don't really know the bus characteristics here. */
   2288 		sc->sc_bus_speed = 33;
   2289 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2290 		/*
   2291 		 * CSA (Communication Streaming Architecture) is about as fast
   2292 		 * a 32-bit 66MHz PCI Bus.
   2293 		 */
   2294 		sc->sc_flags |= WM_F_CSA;
   2295 		sc->sc_bus_speed = 66;
   2296 		aprint_verbose_dev(sc->sc_dev,
   2297 		    "Communication Streaming Architecture\n");
   2298 		if (sc->sc_type == WM_T_82547) {
   2299 			callout_init(&sc->sc_txfifo_ch, CALLOUT_MPSAFE);
   2300 			callout_setfunc(&sc->sc_txfifo_ch,
   2301 			    wm_82547_txfifo_stall, sc);
   2302 			aprint_verbose_dev(sc->sc_dev,
   2303 			    "using 82547 Tx FIFO stall work-around\n");
   2304 		}
   2305 	} else if (sc->sc_type >= WM_T_82571) {
   2306 		sc->sc_flags |= WM_F_PCIE;
   2307 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2308 		    && (sc->sc_type != WM_T_ICH10)
   2309 		    && (sc->sc_type != WM_T_PCH)
   2310 		    && (sc->sc_type != WM_T_PCH2)
   2311 		    && (sc->sc_type != WM_T_PCH_LPT)
   2312 		    && (sc->sc_type != WM_T_PCH_SPT)
   2313 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2314 			/* ICH* and PCH* have no PCIe capability registers */
   2315 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2316 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2317 				NULL) == 0)
   2318 				aprint_error_dev(sc->sc_dev,
   2319 				    "unable to find PCIe capability\n");
   2320 		}
   2321 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2322 	} else {
   2323 		reg = CSR_READ(sc, WMREG_STATUS);
   2324 		if (reg & STATUS_BUS64)
   2325 			sc->sc_flags |= WM_F_BUS64;
   2326 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2327 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2328 
   2329 			sc->sc_flags |= WM_F_PCIX;
   2330 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2331 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2332 				aprint_error_dev(sc->sc_dev,
   2333 				    "unable to find PCIX capability\n");
   2334 			else if (sc->sc_type != WM_T_82545_3 &&
   2335 			    sc->sc_type != WM_T_82546_3) {
   2336 				/*
   2337 				 * Work around a problem caused by the BIOS
   2338 				 * setting the max memory read byte count
   2339 				 * incorrectly.
   2340 				 */
   2341 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2342 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2343 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2344 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2345 
   2346 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2347 				    PCIX_CMD_BYTECNT_SHIFT;
   2348 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2349 				    PCIX_STATUS_MAXB_SHIFT;
   2350 				if (bytecnt > maxb) {
   2351 					aprint_verbose_dev(sc->sc_dev,
   2352 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2353 					    512 << bytecnt, 512 << maxb);
   2354 					pcix_cmd = (pcix_cmd &
   2355 					    ~PCIX_CMD_BYTECNT_MASK) |
   2356 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2357 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2358 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2359 					    pcix_cmd);
   2360 				}
   2361 			}
   2362 		}
   2363 		/*
   2364 		 * The quad port adapter is special; it has a PCIX-PCIX
   2365 		 * bridge on the board, and can run the secondary bus at
   2366 		 * a higher speed.
   2367 		 */
   2368 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2369 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2370 								      : 66;
   2371 		} else if (sc->sc_flags & WM_F_PCIX) {
   2372 			switch (reg & STATUS_PCIXSPD_MASK) {
   2373 			case STATUS_PCIXSPD_50_66:
   2374 				sc->sc_bus_speed = 66;
   2375 				break;
   2376 			case STATUS_PCIXSPD_66_100:
   2377 				sc->sc_bus_speed = 100;
   2378 				break;
   2379 			case STATUS_PCIXSPD_100_133:
   2380 				sc->sc_bus_speed = 133;
   2381 				break;
   2382 			default:
   2383 				aprint_error_dev(sc->sc_dev,
   2384 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2385 				    reg & STATUS_PCIXSPD_MASK);
   2386 				sc->sc_bus_speed = 66;
   2387 				break;
   2388 			}
   2389 		} else
   2390 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2391 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2392 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2393 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2394 	}
   2395 
   2396 	/* clear interesting stat counters */
   2397 	CSR_READ(sc, WMREG_COLC);
   2398 	CSR_READ(sc, WMREG_RXERRC);
   2399 
   2400 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2401 	    || (sc->sc_type >= WM_T_ICH8))
   2402 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2403 	if (sc->sc_type >= WM_T_ICH8)
   2404 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2405 
   2406 	/* Set PHY, NVM mutex related stuff */
   2407 	switch (sc->sc_type) {
   2408 	case WM_T_82542_2_0:
   2409 	case WM_T_82542_2_1:
   2410 	case WM_T_82543:
   2411 	case WM_T_82544:
   2412 		/* Microwire */
   2413 		sc->nvm.read = wm_nvm_read_uwire;
   2414 		sc->sc_nvm_wordsize = 64;
   2415 		sc->sc_nvm_addrbits = 6;
   2416 		break;
   2417 	case WM_T_82540:
   2418 	case WM_T_82545:
   2419 	case WM_T_82545_3:
   2420 	case WM_T_82546:
   2421 	case WM_T_82546_3:
   2422 		/* Microwire */
   2423 		sc->nvm.read = wm_nvm_read_uwire;
   2424 		reg = CSR_READ(sc, WMREG_EECD);
   2425 		if (reg & EECD_EE_SIZE) {
   2426 			sc->sc_nvm_wordsize = 256;
   2427 			sc->sc_nvm_addrbits = 8;
   2428 		} else {
   2429 			sc->sc_nvm_wordsize = 64;
   2430 			sc->sc_nvm_addrbits = 6;
   2431 		}
   2432 		sc->sc_flags |= WM_F_LOCK_EECD;
   2433 		sc->nvm.acquire = wm_get_eecd;
   2434 		sc->nvm.release = wm_put_eecd;
   2435 		break;
   2436 	case WM_T_82541:
   2437 	case WM_T_82541_2:
   2438 	case WM_T_82547:
   2439 	case WM_T_82547_2:
   2440 		reg = CSR_READ(sc, WMREG_EECD);
   2441 		/*
   2442 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2443 		 * on 8254[17], so set flags and functios before calling it.
   2444 		 */
   2445 		sc->sc_flags |= WM_F_LOCK_EECD;
   2446 		sc->nvm.acquire = wm_get_eecd;
   2447 		sc->nvm.release = wm_put_eecd;
   2448 		if (reg & EECD_EE_TYPE) {
   2449 			/* SPI */
   2450 			sc->nvm.read = wm_nvm_read_spi;
   2451 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2452 			wm_nvm_set_addrbits_size_eecd(sc);
   2453 		} else {
   2454 			/* Microwire */
   2455 			sc->nvm.read = wm_nvm_read_uwire;
   2456 			if ((reg & EECD_EE_ABITS) != 0) {
   2457 				sc->sc_nvm_wordsize = 256;
   2458 				sc->sc_nvm_addrbits = 8;
   2459 			} else {
   2460 				sc->sc_nvm_wordsize = 64;
   2461 				sc->sc_nvm_addrbits = 6;
   2462 			}
   2463 		}
   2464 		break;
   2465 	case WM_T_82571:
   2466 	case WM_T_82572:
   2467 		/* SPI */
   2468 		sc->nvm.read = wm_nvm_read_eerd;
   2469 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2470 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2471 		wm_nvm_set_addrbits_size_eecd(sc);
   2472 		sc->phy.acquire = wm_get_swsm_semaphore;
   2473 		sc->phy.release = wm_put_swsm_semaphore;
   2474 		sc->nvm.acquire = wm_get_nvm_82571;
   2475 		sc->nvm.release = wm_put_nvm_82571;
   2476 		break;
   2477 	case WM_T_82573:
   2478 	case WM_T_82574:
   2479 	case WM_T_82583:
   2480 		sc->nvm.read = wm_nvm_read_eerd;
   2481 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2482 		if (sc->sc_type == WM_T_82573) {
   2483 			sc->phy.acquire = wm_get_swsm_semaphore;
   2484 			sc->phy.release = wm_put_swsm_semaphore;
   2485 			sc->nvm.acquire = wm_get_nvm_82571;
   2486 			sc->nvm.release = wm_put_nvm_82571;
   2487 		} else {
   2488 			/* Both PHY and NVM use the same semaphore. */
   2489 			sc->phy.acquire = sc->nvm.acquire
   2490 			    = wm_get_swfwhw_semaphore;
   2491 			sc->phy.release = sc->nvm.release
   2492 			    = wm_put_swfwhw_semaphore;
   2493 		}
   2494 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2495 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2496 			sc->sc_nvm_wordsize = 2048;
   2497 		} else {
   2498 			/* SPI */
   2499 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2500 			wm_nvm_set_addrbits_size_eecd(sc);
   2501 		}
   2502 		break;
   2503 	case WM_T_82575:
   2504 	case WM_T_82576:
   2505 	case WM_T_82580:
   2506 	case WM_T_I350:
   2507 	case WM_T_I354:
   2508 	case WM_T_80003:
   2509 		/* SPI */
   2510 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2511 		wm_nvm_set_addrbits_size_eecd(sc);
   2512 		if ((sc->sc_type == WM_T_80003)
   2513 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2514 			sc->nvm.read = wm_nvm_read_eerd;
   2515 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2516 		} else {
   2517 			sc->nvm.read = wm_nvm_read_spi;
   2518 			sc->sc_flags |= WM_F_LOCK_EECD;
   2519 		}
   2520 		sc->phy.acquire = wm_get_phy_82575;
   2521 		sc->phy.release = wm_put_phy_82575;
   2522 		sc->nvm.acquire = wm_get_nvm_80003;
   2523 		sc->nvm.release = wm_put_nvm_80003;
   2524 		break;
   2525 	case WM_T_ICH8:
   2526 	case WM_T_ICH9:
   2527 	case WM_T_ICH10:
   2528 	case WM_T_PCH:
   2529 	case WM_T_PCH2:
   2530 	case WM_T_PCH_LPT:
   2531 		sc->nvm.read = wm_nvm_read_ich8;
   2532 		/* FLASH */
   2533 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2534 		sc->sc_nvm_wordsize = 2048;
   2535 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2536 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2537 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2538 			aprint_error_dev(sc->sc_dev,
   2539 			    "can't map FLASH registers\n");
   2540 			goto out;
   2541 		}
   2542 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2543 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2544 		    ICH_FLASH_SECTOR_SIZE;
   2545 		sc->sc_ich8_flash_bank_size =
   2546 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2547 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2548 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2549 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2550 		sc->sc_flashreg_offset = 0;
   2551 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2552 		sc->phy.release = wm_put_swflag_ich8lan;
   2553 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2554 		sc->nvm.release = wm_put_nvm_ich8lan;
   2555 		break;
   2556 	case WM_T_PCH_SPT:
   2557 	case WM_T_PCH_CNP:
   2558 		sc->nvm.read = wm_nvm_read_spt;
   2559 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2560 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2561 		sc->sc_flasht = sc->sc_st;
   2562 		sc->sc_flashh = sc->sc_sh;
   2563 		sc->sc_ich8_flash_base = 0;
   2564 		sc->sc_nvm_wordsize =
   2565 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2566 		    * NVM_SIZE_MULTIPLIER;
   2567 		/* It is size in bytes, we want words */
   2568 		sc->sc_nvm_wordsize /= 2;
   2569 		/* Assume 2 banks */
   2570 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2571 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2572 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2573 		sc->phy.release = wm_put_swflag_ich8lan;
   2574 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2575 		sc->nvm.release = wm_put_nvm_ich8lan;
   2576 		break;
   2577 	case WM_T_I210:
   2578 	case WM_T_I211:
   2579 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2580 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2581 		if (wm_nvm_flash_presence_i210(sc)) {
   2582 			sc->nvm.read = wm_nvm_read_eerd;
   2583 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2584 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2585 			wm_nvm_set_addrbits_size_eecd(sc);
   2586 		} else {
   2587 			sc->nvm.read = wm_nvm_read_invm;
   2588 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2589 			sc->sc_nvm_wordsize = INVM_SIZE;
   2590 		}
   2591 		sc->phy.acquire = wm_get_phy_82575;
   2592 		sc->phy.release = wm_put_phy_82575;
   2593 		sc->nvm.acquire = wm_get_nvm_80003;
   2594 		sc->nvm.release = wm_put_nvm_80003;
   2595 		break;
   2596 	default:
   2597 		break;
   2598 	}
   2599 
   2600 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2601 	switch (sc->sc_type) {
   2602 	case WM_T_82571:
   2603 	case WM_T_82572:
   2604 		reg = CSR_READ(sc, WMREG_SWSM2);
   2605 		if ((reg & SWSM2_LOCK) == 0) {
   2606 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2607 			force_clear_smbi = true;
   2608 		} else
   2609 			force_clear_smbi = false;
   2610 		break;
   2611 	case WM_T_82573:
   2612 	case WM_T_82574:
   2613 	case WM_T_82583:
   2614 		force_clear_smbi = true;
   2615 		break;
   2616 	default:
   2617 		force_clear_smbi = false;
   2618 		break;
   2619 	}
   2620 	if (force_clear_smbi) {
   2621 		reg = CSR_READ(sc, WMREG_SWSM);
   2622 		if ((reg & SWSM_SMBI) != 0)
   2623 			aprint_error_dev(sc->sc_dev,
   2624 			    "Please update the Bootagent\n");
   2625 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2626 	}
   2627 
   2628 	/*
   2629 	 * Defer printing the EEPROM type until after verifying the checksum
   2630 	 * This allows the EEPROM type to be printed correctly in the case
   2631 	 * that no EEPROM is attached.
   2632 	 */
   2633 	/*
   2634 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2635 	 * this for later, so we can fail future reads from the EEPROM.
   2636 	 */
   2637 	if (wm_nvm_validate_checksum(sc)) {
   2638 		/*
   2639 		 * Read twice again because some PCI-e parts fail the
   2640 		 * first check due to the link being in sleep state.
   2641 		 */
   2642 		if (wm_nvm_validate_checksum(sc))
   2643 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2644 	}
   2645 
   2646 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2647 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2648 	else {
   2649 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2650 		    sc->sc_nvm_wordsize);
   2651 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2652 			aprint_verbose("iNVM");
   2653 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2654 			aprint_verbose("FLASH(HW)");
   2655 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2656 			aprint_verbose("FLASH");
   2657 		else {
   2658 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2659 				eetype = "SPI";
   2660 			else
   2661 				eetype = "MicroWire";
   2662 			aprint_verbose("(%d address bits) %s EEPROM",
   2663 			    sc->sc_nvm_addrbits, eetype);
   2664 		}
   2665 	}
   2666 	wm_nvm_version(sc);
   2667 	aprint_verbose("\n");
   2668 
   2669 	/*
   2670 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2671 	 * incorrect.
   2672 	 */
   2673 	wm_gmii_setup_phytype(sc, 0, 0);
   2674 
   2675 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2676 	switch (sc->sc_type) {
   2677 	case WM_T_ICH8:
   2678 	case WM_T_ICH9:
   2679 	case WM_T_ICH10:
   2680 	case WM_T_PCH:
   2681 	case WM_T_PCH2:
   2682 	case WM_T_PCH_LPT:
   2683 	case WM_T_PCH_SPT:
   2684 	case WM_T_PCH_CNP:
   2685 		apme_mask = WUC_APME;
   2686 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2687 		if ((eeprom_data & apme_mask) != 0)
   2688 			sc->sc_flags |= WM_F_WOL;
   2689 		break;
   2690 	default:
   2691 		break;
   2692 	}
   2693 
   2694 	/* Reset the chip to a known state. */
   2695 	wm_reset(sc);
   2696 
   2697 	/*
   2698 	 * Check for I21[01] PLL workaround.
   2699 	 *
   2700 	 * Three cases:
   2701 	 * a) Chip is I211.
   2702 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2703 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2704 	 */
   2705 	if (sc->sc_type == WM_T_I211)
   2706 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2707 	if (sc->sc_type == WM_T_I210) {
   2708 		if (!wm_nvm_flash_presence_i210(sc))
   2709 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2710 		else if ((sc->sc_nvm_ver_major < 3)
   2711 		    || ((sc->sc_nvm_ver_major == 3)
   2712 			&& (sc->sc_nvm_ver_minor < 25))) {
   2713 			aprint_verbose_dev(sc->sc_dev,
   2714 			    "ROM image version %d.%d is older than 3.25\n",
   2715 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2716 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2717 		}
   2718 	}
   2719 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2720 		wm_pll_workaround_i210(sc);
   2721 
   2722 	wm_get_wakeup(sc);
   2723 
   2724 	/* Non-AMT based hardware can now take control from firmware */
   2725 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2726 		wm_get_hw_control(sc);
   2727 
   2728 	/*
   2729 	 * Read the Ethernet address from the EEPROM, if not first found
   2730 	 * in device properties.
   2731 	 */
   2732 	ea = prop_dictionary_get(dict, "mac-address");
   2733 	if (ea != NULL) {
   2734 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2735 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2736 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2737 	} else {
   2738 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2739 			aprint_error_dev(sc->sc_dev,
   2740 			    "unable to read Ethernet address\n");
   2741 			goto out;
   2742 		}
   2743 	}
   2744 
   2745 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2746 	    ether_sprintf(enaddr));
   2747 
   2748 	/*
   2749 	 * Read the config info from the EEPROM, and set up various
   2750 	 * bits in the control registers based on their contents.
   2751 	 */
   2752 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2753 	if (pn != NULL) {
   2754 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2755 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2756 	} else {
   2757 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2758 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2759 			goto out;
   2760 		}
   2761 	}
   2762 
   2763 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2764 	if (pn != NULL) {
   2765 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2766 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2767 	} else {
   2768 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2769 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2770 			goto out;
   2771 		}
   2772 	}
   2773 
   2774 	/* check for WM_F_WOL */
   2775 	switch (sc->sc_type) {
   2776 	case WM_T_82542_2_0:
   2777 	case WM_T_82542_2_1:
   2778 	case WM_T_82543:
   2779 		/* dummy? */
   2780 		eeprom_data = 0;
   2781 		apme_mask = NVM_CFG3_APME;
   2782 		break;
   2783 	case WM_T_82544:
   2784 		apme_mask = NVM_CFG2_82544_APM_EN;
   2785 		eeprom_data = cfg2;
   2786 		break;
   2787 	case WM_T_82546:
   2788 	case WM_T_82546_3:
   2789 	case WM_T_82571:
   2790 	case WM_T_82572:
   2791 	case WM_T_82573:
   2792 	case WM_T_82574:
   2793 	case WM_T_82583:
   2794 	case WM_T_80003:
   2795 	case WM_T_82575:
   2796 	case WM_T_82576:
   2797 		apme_mask = NVM_CFG3_APME;
   2798 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2799 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2800 		break;
   2801 	case WM_T_82580:
   2802 	case WM_T_I350:
   2803 	case WM_T_I354:
   2804 	case WM_T_I210:
   2805 	case WM_T_I211:
   2806 		apme_mask = NVM_CFG3_APME;
   2807 		wm_nvm_read(sc,
   2808 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2809 		    1, &eeprom_data);
   2810 		break;
   2811 	case WM_T_ICH8:
   2812 	case WM_T_ICH9:
   2813 	case WM_T_ICH10:
   2814 	case WM_T_PCH:
   2815 	case WM_T_PCH2:
   2816 	case WM_T_PCH_LPT:
   2817 	case WM_T_PCH_SPT:
   2818 	case WM_T_PCH_CNP:
   2819 		/* Already checked before wm_reset () */
   2820 		apme_mask = eeprom_data = 0;
   2821 		break;
   2822 	default: /* XXX 82540 */
   2823 		apme_mask = NVM_CFG3_APME;
   2824 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2825 		break;
   2826 	}
   2827 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2828 	if ((eeprom_data & apme_mask) != 0)
   2829 		sc->sc_flags |= WM_F_WOL;
   2830 
   2831 	/*
   2832 	 * We have the eeprom settings, now apply the special cases
   2833 	 * where the eeprom may be wrong or the board won't support
   2834 	 * wake on lan on a particular port
   2835 	 */
   2836 	switch (sc->sc_pcidevid) {
   2837 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2838 		sc->sc_flags &= ~WM_F_WOL;
   2839 		break;
   2840 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2841 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2842 		/* Wake events only supported on port A for dual fiber
   2843 		 * regardless of eeprom setting */
   2844 		if (sc->sc_funcid == 1)
   2845 			sc->sc_flags &= ~WM_F_WOL;
   2846 		break;
   2847 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2848 		/* If quad port adapter, disable WoL on all but port A */
   2849 		if (sc->sc_funcid != 0)
   2850 			sc->sc_flags &= ~WM_F_WOL;
   2851 		break;
   2852 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2853 		/* Wake events only supported on port A for dual fiber
   2854 		 * regardless of eeprom setting */
   2855 		if (sc->sc_funcid == 1)
   2856 			sc->sc_flags &= ~WM_F_WOL;
   2857 		break;
   2858 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2859 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2860 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2861 		/* If quad port adapter, disable WoL on all but port A */
   2862 		if (sc->sc_funcid != 0)
   2863 			sc->sc_flags &= ~WM_F_WOL;
   2864 		break;
   2865 	}
   2866 
   2867 	if (sc->sc_type >= WM_T_82575) {
   2868 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2869 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2870 			    nvmword);
   2871 			if ((sc->sc_type == WM_T_82575) ||
   2872 			    (sc->sc_type == WM_T_82576)) {
   2873 				/* Check NVM for autonegotiation */
   2874 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2875 				    != 0)
   2876 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2877 			}
   2878 			if ((sc->sc_type == WM_T_82575) ||
   2879 			    (sc->sc_type == WM_T_I350)) {
   2880 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2881 					sc->sc_flags |= WM_F_MAS;
   2882 			}
   2883 		}
   2884 	}
   2885 
   2886 	/*
   2887 	 * XXX need special handling for some multiple port cards
   2888 	 * to disable a paticular port.
   2889 	 */
   2890 
   2891 	if (sc->sc_type >= WM_T_82544) {
   2892 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2893 		if (pn != NULL) {
   2894 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2895 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2896 		} else {
   2897 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2898 				aprint_error_dev(sc->sc_dev,
   2899 				    "unable to read SWDPIN\n");
   2900 				goto out;
   2901 			}
   2902 		}
   2903 	}
   2904 
   2905 	if (cfg1 & NVM_CFG1_ILOS)
   2906 		sc->sc_ctrl |= CTRL_ILOS;
   2907 
   2908 	/*
   2909 	 * XXX
   2910 	 * This code isn't correct because pin 2 and 3 are located
   2911 	 * in different position on newer chips. Check all datasheet.
   2912 	 *
   2913 	 * Until resolve this problem, check if a chip < 82580
   2914 	 */
   2915 	if (sc->sc_type <= WM_T_82580) {
   2916 		if (sc->sc_type >= WM_T_82544) {
   2917 			sc->sc_ctrl |=
   2918 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2919 			    CTRL_SWDPIO_SHIFT;
   2920 			sc->sc_ctrl |=
   2921 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2922 			    CTRL_SWDPINS_SHIFT;
   2923 		} else {
   2924 			sc->sc_ctrl |=
   2925 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2926 			    CTRL_SWDPIO_SHIFT;
   2927 		}
   2928 	}
   2929 
   2930 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2931 		wm_nvm_read(sc,
   2932 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2933 		    1, &nvmword);
   2934 		if (nvmword & NVM_CFG3_ILOS)
   2935 			sc->sc_ctrl |= CTRL_ILOS;
   2936 	}
   2937 
   2938 #if 0
   2939 	if (sc->sc_type >= WM_T_82544) {
   2940 		if (cfg1 & NVM_CFG1_IPS0)
   2941 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2942 		if (cfg1 & NVM_CFG1_IPS1)
   2943 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2944 		sc->sc_ctrl_ext |=
   2945 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2946 		    CTRL_EXT_SWDPIO_SHIFT;
   2947 		sc->sc_ctrl_ext |=
   2948 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2949 		    CTRL_EXT_SWDPINS_SHIFT;
   2950 	} else {
   2951 		sc->sc_ctrl_ext |=
   2952 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2953 		    CTRL_EXT_SWDPIO_SHIFT;
   2954 	}
   2955 #endif
   2956 
   2957 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2958 #if 0
   2959 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2960 #endif
   2961 
   2962 	if (sc->sc_type == WM_T_PCH) {
   2963 		uint16_t val;
   2964 
   2965 		/* Save the NVM K1 bit setting */
   2966 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2967 
   2968 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2969 			sc->sc_nvm_k1_enabled = 1;
   2970 		else
   2971 			sc->sc_nvm_k1_enabled = 0;
   2972 	}
   2973 
   2974 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2975 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2976 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2977 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2978 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2979 	    || sc->sc_type == WM_T_82573
   2980 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2981 		/* Copper only */
   2982 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2983 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2984 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2985 	    || (sc->sc_type ==WM_T_I211)) {
   2986 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2987 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2988 		switch (link_mode) {
   2989 		case CTRL_EXT_LINK_MODE_1000KX:
   2990 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2991 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2992 			break;
   2993 		case CTRL_EXT_LINK_MODE_SGMII:
   2994 			if (wm_sgmii_uses_mdio(sc)) {
   2995 				aprint_normal_dev(sc->sc_dev,
   2996 				    "SGMII(MDIO)\n");
   2997 				sc->sc_flags |= WM_F_SGMII;
   2998 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2999 				break;
   3000 			}
   3001 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   3002 			/*FALLTHROUGH*/
   3003 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   3004 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   3005 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   3006 				if (link_mode
   3007 				    == CTRL_EXT_LINK_MODE_SGMII) {
   3008 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3009 					sc->sc_flags |= WM_F_SGMII;
   3010 					aprint_verbose_dev(sc->sc_dev,
   3011 					    "SGMII\n");
   3012 				} else {
   3013 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   3014 					aprint_verbose_dev(sc->sc_dev,
   3015 					    "SERDES\n");
   3016 				}
   3017 				break;
   3018 			}
   3019 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   3020 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   3021 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   3022 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   3023 				sc->sc_flags |= WM_F_SGMII;
   3024 			}
   3025 			/* Do not change link mode for 100BaseFX */
   3026 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   3027 				break;
   3028 
   3029 			/* Change current link mode setting */
   3030 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   3031 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   3032 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   3033 			else
   3034 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   3035 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3036 			break;
   3037 		case CTRL_EXT_LINK_MODE_GMII:
   3038 		default:
   3039 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   3040 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3041 			break;
   3042 		}
   3043 
   3044 		reg &= ~CTRL_EXT_I2C_ENA;
   3045 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   3046 			reg |= CTRL_EXT_I2C_ENA;
   3047 		else
   3048 			reg &= ~CTRL_EXT_I2C_ENA;
   3049 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3050 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   3051 			if (!wm_sgmii_uses_mdio(sc))
   3052 				wm_gmii_setup_phytype(sc, 0, 0);
   3053 			wm_reset_mdicnfg_82580(sc);
   3054 		}
   3055 	} else if (sc->sc_type < WM_T_82543 ||
   3056 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   3057 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   3058 			aprint_error_dev(sc->sc_dev,
   3059 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   3060 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   3061 		}
   3062 	} else {
   3063 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   3064 			aprint_error_dev(sc->sc_dev,
   3065 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   3066 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3067 		}
   3068 	}
   3069 
   3070 	if (sc->sc_type >= WM_T_PCH2)
   3071 		sc->sc_flags |= WM_F_EEE;
   3072 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   3073 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   3074 		/* XXX: Need special handling for I354. (not yet) */
   3075 		if (sc->sc_type != WM_T_I354)
   3076 			sc->sc_flags |= WM_F_EEE;
   3077 	}
   3078 
   3079 	/*
   3080 	 * The I350 has a bug where it always strips the CRC whether
   3081 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   3082 	 */
   3083 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3084 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3085 		sc->sc_flags |= WM_F_CRC_STRIP;
   3086 
   3087 	/* Set device properties (macflags) */
   3088 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   3089 
   3090 	if (sc->sc_flags != 0) {
   3091 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   3092 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   3093 	}
   3094 
   3095 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   3096 
   3097 	/* Initialize the media structures accordingly. */
   3098 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   3099 		wm_gmii_mediainit(sc, wmp->wmp_product);
   3100 	else
   3101 		wm_tbi_mediainit(sc); /* All others */
   3102 
   3103 	ifp = &sc->sc_ethercom.ec_if;
   3104 	xname = device_xname(sc->sc_dev);
   3105 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   3106 	ifp->if_softc = sc;
   3107 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   3108 	ifp->if_extflags = IFEF_MPSAFE;
   3109 	ifp->if_ioctl = wm_ioctl;
   3110 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3111 		ifp->if_start = wm_nq_start;
   3112 		/*
   3113 		 * When the number of CPUs is one and the controller can use
   3114 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   3115 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   3116 		 * and the other is used for link status changing.
   3117 		 * In this situation, wm_nq_transmit() is disadvantageous
   3118 		 * because of wm_select_txqueue() and pcq(9) overhead.
   3119 		 */
   3120 		if (wm_is_using_multiqueue(sc))
   3121 			ifp->if_transmit = wm_nq_transmit;
   3122 	} else {
   3123 		ifp->if_start = wm_start;
   3124 		/*
   3125 		 * wm_transmit() has the same disadvantages as wm_nq_transmit()
   3126 		 * described above.
   3127 		 */
   3128 		if (wm_is_using_multiqueue(sc))
   3129 			ifp->if_transmit = wm_transmit;
   3130 	}
   3131 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   3132 	ifp->if_init = wm_init;
   3133 	ifp->if_stop = wm_stop;
   3134 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   3135 	IFQ_SET_READY(&ifp->if_snd);
   3136 
   3137 	/* Check for jumbo frame */
   3138 	switch (sc->sc_type) {
   3139 	case WM_T_82573:
   3140 		/* XXX limited to 9234 if ASPM is disabled */
   3141 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   3142 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   3143 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3144 		break;
   3145 	case WM_T_82571:
   3146 	case WM_T_82572:
   3147 	case WM_T_82574:
   3148 	case WM_T_82583:
   3149 	case WM_T_82575:
   3150 	case WM_T_82576:
   3151 	case WM_T_82580:
   3152 	case WM_T_I350:
   3153 	case WM_T_I354:
   3154 	case WM_T_I210:
   3155 	case WM_T_I211:
   3156 	case WM_T_80003:
   3157 	case WM_T_ICH9:
   3158 	case WM_T_ICH10:
   3159 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   3160 	case WM_T_PCH_LPT:
   3161 	case WM_T_PCH_SPT:
   3162 	case WM_T_PCH_CNP:
   3163 		/* XXX limited to 9234 */
   3164 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3165 		break;
   3166 	case WM_T_PCH:
   3167 		/* XXX limited to 4096 */
   3168 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3169 		break;
   3170 	case WM_T_82542_2_0:
   3171 	case WM_T_82542_2_1:
   3172 	case WM_T_ICH8:
   3173 		/* No support for jumbo frame */
   3174 		break;
   3175 	default:
   3176 		/* ETHER_MAX_LEN_JUMBO */
   3177 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3178 		break;
   3179 	}
   3180 
   3181 	/* If we're a i82543 or greater, we can support VLANs. */
   3182 	if (sc->sc_type >= WM_T_82543) {
   3183 		sc->sc_ethercom.ec_capabilities |=
   3184 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   3185 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   3186 	}
   3187 
   3188 	if ((sc->sc_flags & WM_F_EEE) != 0)
   3189 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   3190 
   3191 	/*
   3192 	 * We can perform TCPv4 and UDPv4 checksums in-bound.  Only
   3193 	 * on i82543 and later.
   3194 	 */
   3195 	if (sc->sc_type >= WM_T_82543) {
   3196 		ifp->if_capabilities |=
   3197 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3198 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3199 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3200 		    IFCAP_CSUM_TCPv6_Tx |
   3201 		    IFCAP_CSUM_UDPv6_Tx;
   3202 	}
   3203 
   3204 	/*
   3205 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3206 	 *
   3207 	 *	82541GI (8086:1076) ... no
   3208 	 *	82572EI (8086:10b9) ... yes
   3209 	 */
   3210 	if (sc->sc_type >= WM_T_82571) {
   3211 		ifp->if_capabilities |=
   3212 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3213 	}
   3214 
   3215 	/*
   3216 	 * If we're a i82544 or greater (except i82547), we can do
   3217 	 * TCP segmentation offload.
   3218 	 */
   3219 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547)
   3220 		ifp->if_capabilities |= IFCAP_TSOv4;
   3221 
   3222 	if (sc->sc_type >= WM_T_82571)
   3223 		ifp->if_capabilities |= IFCAP_TSOv6;
   3224 
   3225 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3226 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3227 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3228 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3229 
   3230 	/* Attach the interface. */
   3231 	if_initialize(ifp);
   3232 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3233 	ether_ifattach(ifp, enaddr);
   3234 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3235 	if_register(ifp);
   3236 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3237 	    RND_FLAG_DEFAULT);
   3238 
   3239 #ifdef WM_EVENT_COUNTERS
   3240 	/* Attach event counters. */
   3241 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3242 	    NULL, xname, "linkintr");
   3243 
   3244 	evcnt_attach_dynamic(&sc->sc_ev_crcerrs, EVCNT_TYPE_MISC,
   3245 	    NULL, xname, "CRC Error");
   3246 	evcnt_attach_dynamic(&sc->sc_ev_symerrc, EVCNT_TYPE_MISC,
   3247 	    NULL, xname, "Symbol Error");
   3248 	evcnt_attach_dynamic(&sc->sc_ev_mpc, EVCNT_TYPE_MISC,
   3249 	    NULL, xname, "Missed Packets");
   3250 	evcnt_attach_dynamic(&sc->sc_ev_colc, EVCNT_TYPE_MISC,
   3251 	    NULL, xname, "Collision");
   3252 	evcnt_attach_dynamic(&sc->sc_ev_sec, EVCNT_TYPE_MISC,
   3253 	    NULL, xname, "Sequence Error");
   3254 	evcnt_attach_dynamic(&sc->sc_ev_rlec, EVCNT_TYPE_MISC,
   3255 	    NULL, xname, "Receive Length Error");
   3256 
   3257 	if (sc->sc_type >= WM_T_82543) {
   3258 		evcnt_attach_dynamic(&sc->sc_ev_algnerrc, EVCNT_TYPE_MISC,
   3259 		    NULL, xname, "Alignment Error");
   3260 		evcnt_attach_dynamic(&sc->sc_ev_rxerrc, EVCNT_TYPE_MISC,
   3261 		    NULL, xname, "Receive Error");
   3262 		/* XXX Does 82575 have HTDPMC? */
   3263 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   3264 			evcnt_attach_dynamic(&sc->sc_ev_cexterr,
   3265 			    EVCNT_TYPE_MISC, NULL, xname,
   3266 			    "Carrier Extension Error");
   3267 		else
   3268 			evcnt_attach_dynamic(&sc->sc_ev_htdpmc,
   3269 			    EVCNT_TYPE_MISC, NULL, xname,
   3270 			    "Host Transmit Discarded Packets by MAC");
   3271 
   3272 		evcnt_attach_dynamic(&sc->sc_ev_tncrs, EVCNT_TYPE_MISC,
   3273 		    NULL, xname, "Tx with No CRS");
   3274 		evcnt_attach_dynamic(&sc->sc_ev_tsctc, EVCNT_TYPE_MISC,
   3275 		    NULL, xname, "TCP Segmentation Context Tx");
   3276 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   3277 			evcnt_attach_dynamic(&sc->sc_ev_tsctfc,
   3278 			    EVCNT_TYPE_MISC, NULL, xname,
   3279 			    "TCP Segmentation Context Tx Fail");
   3280 		else {
   3281 			/* XXX Is the circuit breaker only for 82576? */
   3282 			evcnt_attach_dynamic(&sc->sc_ev_cbrdpc,
   3283 			    EVCNT_TYPE_MISC, NULL, xname,
   3284 			    "Circuit Breaker Rx Dropped Packet");
   3285 			evcnt_attach_dynamic(&sc->sc_ev_cbrmpc,
   3286 			    EVCNT_TYPE_MISC, NULL, xname,
   3287 			    "Circuit Breaker Rx Manageability Packet");
   3288 		}
   3289 	}
   3290 
   3291 	if (sc->sc_type >= WM_T_82542_2_1) {
   3292 		evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3293 		    NULL, xname, "tx_xoff");
   3294 		evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3295 		    NULL, xname, "tx_xon");
   3296 		evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3297 		    NULL, xname, "rx_xoff");
   3298 		evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3299 		    NULL, xname, "rx_xon");
   3300 		evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3301 		    NULL, xname, "rx_macctl");
   3302 	}
   3303 
   3304 	evcnt_attach_dynamic(&sc->sc_ev_scc, EVCNT_TYPE_MISC,
   3305 	    NULL, xname, "Single Collision");
   3306 	evcnt_attach_dynamic(&sc->sc_ev_ecol, EVCNT_TYPE_MISC,
   3307 	    NULL, xname, "Excessive Collisions");
   3308 	evcnt_attach_dynamic(&sc->sc_ev_mcc, EVCNT_TYPE_MISC,
   3309 	    NULL, xname, "Multiple Collision");
   3310 	evcnt_attach_dynamic(&sc->sc_ev_latecol, EVCNT_TYPE_MISC,
   3311 	    NULL, xname, "Late Collisions");
   3312 
   3313 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
   3314 		evcnt_attach_dynamic(&sc->sc_ev_cbtmpc, EVCNT_TYPE_MISC,
   3315 		    NULL, xname, "Circuit Breaker Tx Manageability Packet");
   3316 
   3317 	evcnt_attach_dynamic(&sc->sc_ev_dc, EVCNT_TYPE_MISC,
   3318 	    NULL, xname, "Defer");
   3319 	evcnt_attach_dynamic(&sc->sc_ev_prc64, EVCNT_TYPE_MISC,
   3320 	    NULL, xname, "Packets Rx (64 bytes)");
   3321 	evcnt_attach_dynamic(&sc->sc_ev_prc127, EVCNT_TYPE_MISC,
   3322 	    NULL, xname, "Packets Rx (65-127 bytes)");
   3323 	evcnt_attach_dynamic(&sc->sc_ev_prc255, EVCNT_TYPE_MISC,
   3324 	    NULL, xname, "Packets Rx (128-255 bytes)");
   3325 	evcnt_attach_dynamic(&sc->sc_ev_prc511, EVCNT_TYPE_MISC,
   3326 	    NULL, xname, "Packets Rx (256-511 bytes)");
   3327 	evcnt_attach_dynamic(&sc->sc_ev_prc1023, EVCNT_TYPE_MISC,
   3328 	    NULL, xname, "Packets Rx (512-1023 bytes)");
   3329 	evcnt_attach_dynamic(&sc->sc_ev_prc1522, EVCNT_TYPE_MISC,
   3330 	    NULL, xname, "Packets Rx (1024-1522 bytes)");
   3331 	evcnt_attach_dynamic(&sc->sc_ev_gprc, EVCNT_TYPE_MISC,
   3332 	    NULL, xname, "Good Packets Rx");
   3333 	evcnt_attach_dynamic(&sc->sc_ev_bprc, EVCNT_TYPE_MISC,
   3334 	    NULL, xname, "Broadcast Packets Rx");
   3335 	evcnt_attach_dynamic(&sc->sc_ev_mprc, EVCNT_TYPE_MISC,
   3336 	    NULL, xname, "Multicast Packets Rx");
   3337 	evcnt_attach_dynamic(&sc->sc_ev_gptc, EVCNT_TYPE_MISC,
   3338 	    NULL, xname, "Good Packets Tx");
   3339 	evcnt_attach_dynamic(&sc->sc_ev_gorc, EVCNT_TYPE_MISC,
   3340 	    NULL, xname, "Good Octets Rx");
   3341 	evcnt_attach_dynamic(&sc->sc_ev_gotc, EVCNT_TYPE_MISC,
   3342 	    NULL, xname, "Good Octets Tx");
   3343 	evcnt_attach_dynamic(&sc->sc_ev_rnbc, EVCNT_TYPE_MISC,
   3344 	    NULL, xname, "Rx No Buffers");
   3345 	evcnt_attach_dynamic(&sc->sc_ev_ruc, EVCNT_TYPE_MISC,
   3346 	    NULL, xname, "Rx Undersize");
   3347 	evcnt_attach_dynamic(&sc->sc_ev_rfc, EVCNT_TYPE_MISC,
   3348 	    NULL, xname, "Rx Fragment");
   3349 	evcnt_attach_dynamic(&sc->sc_ev_roc, EVCNT_TYPE_MISC,
   3350 	    NULL, xname, "Rx Oversize");
   3351 	evcnt_attach_dynamic(&sc->sc_ev_rjc, EVCNT_TYPE_MISC,
   3352 	    NULL, xname, "Rx Jabber");
   3353 	if (sc->sc_type >= WM_T_82540) {
   3354 		evcnt_attach_dynamic(&sc->sc_ev_mgtprc, EVCNT_TYPE_MISC,
   3355 		    NULL, xname, "Management Packets RX");
   3356 		evcnt_attach_dynamic(&sc->sc_ev_mgtpdc, EVCNT_TYPE_MISC,
   3357 		    NULL, xname, "Management Packets Dropped");
   3358 		evcnt_attach_dynamic(&sc->sc_ev_mgtptc, EVCNT_TYPE_MISC,
   3359 		    NULL, xname, "Management Packets TX");
   3360 	}
   3361 	evcnt_attach_dynamic(&sc->sc_ev_tor, EVCNT_TYPE_MISC,
   3362 	    NULL, xname, "Total Octets Rx");
   3363 	evcnt_attach_dynamic(&sc->sc_ev_tot, EVCNT_TYPE_MISC,
   3364 	    NULL, xname, "Total Octets Tx");
   3365 	evcnt_attach_dynamic(&sc->sc_ev_tpr, EVCNT_TYPE_MISC,
   3366 	    NULL, xname, "Total Packets Rx");
   3367 	evcnt_attach_dynamic(&sc->sc_ev_tpt, EVCNT_TYPE_MISC,
   3368 	    NULL, xname, "Total Packets Tx");
   3369 	evcnt_attach_dynamic(&sc->sc_ev_ptc64, EVCNT_TYPE_MISC,
   3370 	    NULL, xname, "Packets Tx (64 bytes)");
   3371 	evcnt_attach_dynamic(&sc->sc_ev_ptc127, EVCNT_TYPE_MISC,
   3372 	    NULL, xname, "Packets Tx (65-127 bytes)");
   3373 	evcnt_attach_dynamic(&sc->sc_ev_ptc255, EVCNT_TYPE_MISC,
   3374 	    NULL, xname, "Packets Tx (128-255 bytes)");
   3375 	evcnt_attach_dynamic(&sc->sc_ev_ptc511, EVCNT_TYPE_MISC,
   3376 	    NULL, xname, "Packets Tx (256-511 bytes)");
   3377 	evcnt_attach_dynamic(&sc->sc_ev_ptc1023, EVCNT_TYPE_MISC,
   3378 	    NULL, xname, "Packets Tx (512-1023 bytes)");
   3379 	evcnt_attach_dynamic(&sc->sc_ev_ptc1522, EVCNT_TYPE_MISC,
   3380 	    NULL, xname, "Packets Tx (1024-1522 Bytes)");
   3381 	evcnt_attach_dynamic(&sc->sc_ev_mptc, EVCNT_TYPE_MISC,
   3382 	    NULL, xname, "Multicast Packets Tx");
   3383 	evcnt_attach_dynamic(&sc->sc_ev_bptc, EVCNT_TYPE_MISC,
   3384 	    NULL, xname, "Broadcast Packets Tx");
   3385 	evcnt_attach_dynamic(&sc->sc_ev_iac, EVCNT_TYPE_MISC,
   3386 	    NULL, xname, "Interrupt Assertion");
   3387 	if (sc->sc_type < WM_T_82575) {
   3388 		evcnt_attach_dynamic(&sc->sc_ev_icrxptc, EVCNT_TYPE_MISC,
   3389 		    NULL, xname, "Intr. Cause Rx Pkt Timer Expire");
   3390 		evcnt_attach_dynamic(&sc->sc_ev_icrxatc, EVCNT_TYPE_MISC,
   3391 		    NULL, xname, "Intr. Cause Rx Abs Timer Expire");
   3392 		evcnt_attach_dynamic(&sc->sc_ev_ictxptc, EVCNT_TYPE_MISC,
   3393 		    NULL, xname, "Intr. Cause Tx Pkt Timer Expire");
   3394 		evcnt_attach_dynamic(&sc->sc_ev_ictxatc, EVCNT_TYPE_MISC,
   3395 		    NULL, xname, "Intr. Cause Tx Abs Timer Expire");
   3396 		evcnt_attach_dynamic(&sc->sc_ev_ictxqec, EVCNT_TYPE_MISC,
   3397 		    NULL, xname, "Intr. Cause Tx Queue Empty");
   3398 		evcnt_attach_dynamic(&sc->sc_ev_ictxqmtc, EVCNT_TYPE_MISC,
   3399 		    NULL, xname, "Intr. Cause Tx Queue Min Thresh");
   3400 		evcnt_attach_dynamic(&sc->sc_ev_rxdmtc, EVCNT_TYPE_MISC,
   3401 		    NULL, xname, "Intr. Cause Rx Desc Min Thresh");
   3402 
   3403 		/* XXX 82575 document says it has ICRXOC. Is that right? */
   3404 		evcnt_attach_dynamic(&sc->sc_ev_icrxoc, EVCNT_TYPE_MISC,
   3405 		    NULL, xname, "Interrupt Cause Receiver Overrun");
   3406 	} else if (!WM_IS_ICHPCH(sc)) {
   3407 		/*
   3408 		 * For 82575 and newer.
   3409 		 *
   3410 		 * On 80003, ICHs and PCHs, it seems all of the following
   3411 		 * registers are zero.
   3412 		 */
   3413 		evcnt_attach_dynamic(&sc->sc_ev_rpthc, EVCNT_TYPE_MISC,
   3414 		    NULL, xname, "Rx Packets To Host");
   3415 		evcnt_attach_dynamic(&sc->sc_ev_debug1, EVCNT_TYPE_MISC,
   3416 		    NULL, xname, "Debug Counter 1");
   3417 		evcnt_attach_dynamic(&sc->sc_ev_debug2, EVCNT_TYPE_MISC,
   3418 		    NULL, xname, "Debug Counter 2");
   3419 		evcnt_attach_dynamic(&sc->sc_ev_debug3, EVCNT_TYPE_MISC,
   3420 		    NULL, xname, "Debug Counter 3");
   3421 
   3422 		/*
   3423 		 * 82575 datasheet says 0x4118 is for TXQEC(Tx Queue Empty).
   3424 		 * I think it's wrong. The real count I observed is the same
   3425 		 * as GPTC(Good Packets Tx) and TPT(Total Packets Tx).
   3426 		 * It's HGPTC(Host Good Packets Tx) which is described in
   3427 		 * 82576's datasheet.
   3428 		 */
   3429 		evcnt_attach_dynamic(&sc->sc_ev_hgptc, EVCNT_TYPE_MISC,
   3430 		    NULL, xname, "Host Good Packets TX");
   3431 
   3432 		evcnt_attach_dynamic(&sc->sc_ev_debug4, EVCNT_TYPE_MISC,
   3433 		    NULL, xname, "Debug Counter 4");
   3434 		evcnt_attach_dynamic(&sc->sc_ev_rxdmtc, EVCNT_TYPE_MISC,
   3435 		    NULL, xname, "Rx Desc Min Thresh");
   3436 		/* XXX Is the circuit breaker only for 82576? */
   3437 		evcnt_attach_dynamic(&sc->sc_ev_htcbdpc, EVCNT_TYPE_MISC,
   3438 		    NULL, xname, "Host Tx Circuit Breaker Dropped Packets");
   3439 
   3440 		evcnt_attach_dynamic(&sc->sc_ev_hgorc, EVCNT_TYPE_MISC,
   3441 		    NULL, xname, "Host Good Octets Rx");
   3442 		evcnt_attach_dynamic(&sc->sc_ev_hgotc, EVCNT_TYPE_MISC,
   3443 		    NULL, xname, "Host Good Octets Tx");
   3444 		evcnt_attach_dynamic(&sc->sc_ev_lenerrs, EVCNT_TYPE_MISC,
   3445 		    NULL, xname, "Length Errors");
   3446 	}
   3447 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   3448 		evcnt_attach_dynamic(&sc->sc_ev_tlpic, EVCNT_TYPE_MISC,
   3449 		    NULL, xname, "EEE Tx LPI");
   3450 		evcnt_attach_dynamic(&sc->sc_ev_rlpic, EVCNT_TYPE_MISC,
   3451 		    NULL, xname, "EEE Rx LPI");
   3452 		evcnt_attach_dynamic(&sc->sc_ev_b2ogprc, EVCNT_TYPE_MISC,
   3453 		    NULL, xname, "BMC2OS Packets received by host");
   3454 		evcnt_attach_dynamic(&sc->sc_ev_o2bspc, EVCNT_TYPE_MISC,
   3455 		    NULL, xname, "OS2BMC Packets transmitted by host");
   3456 		evcnt_attach_dynamic(&sc->sc_ev_b2ospc, EVCNT_TYPE_MISC,
   3457 		    NULL, xname, "BMC2OS Packets sent by BMC");
   3458 		evcnt_attach_dynamic(&sc->sc_ev_o2bgptc, EVCNT_TYPE_MISC,
   3459 		    NULL, xname, "OS2BMC Packets received by BMC");
   3460 		evcnt_attach_dynamic(&sc->sc_ev_scvpc, EVCNT_TYPE_MISC,
   3461 		    NULL, xname, "SerDes/SGMII Code Violation Packet");
   3462 		evcnt_attach_dynamic(&sc->sc_ev_hrmpc, EVCNT_TYPE_MISC,
   3463 		    NULL, xname, "Header Redirection Missed Packet");
   3464 	}
   3465 #endif /* WM_EVENT_COUNTERS */
   3466 
   3467 	sc->sc_txrx_use_workqueue = false;
   3468 
   3469 	if (wm_phy_need_linkdown_discard(sc)) {
   3470 		DPRINTF(sc, WM_DEBUG_LINK,
   3471 		    ("%s: %s: Set linkdown discard flag\n",
   3472 			device_xname(sc->sc_dev), __func__));
   3473 		wm_set_linkdown_discard(sc);
   3474 	}
   3475 
   3476 	wm_init_sysctls(sc);
   3477 
   3478 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3479 		pmf_class_network_register(self, ifp);
   3480 	else
   3481 		aprint_error_dev(self, "couldn't establish power handler\n");
   3482 
   3483 	sc->sc_flags |= WM_F_ATTACHED;
   3484 out:
   3485 	return;
   3486 }
   3487 
   3488 /* The detach function (ca_detach) */
   3489 static int
   3490 wm_detach(device_t self, int flags __unused)
   3491 {
   3492 	struct wm_softc *sc = device_private(self);
   3493 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3494 	int i;
   3495 
   3496 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3497 		return 0;
   3498 
   3499 	/* Stop the interface. Callouts are stopped in it. */
   3500 	IFNET_LOCK(ifp);
   3501 	sc->sc_dying = true;
   3502 	wm_stop(ifp, 1);
   3503 	IFNET_UNLOCK(ifp);
   3504 
   3505 	pmf_device_deregister(self);
   3506 
   3507 	sysctl_teardown(&sc->sc_sysctllog);
   3508 
   3509 #ifdef WM_EVENT_COUNTERS
   3510 	evcnt_detach(&sc->sc_ev_linkintr);
   3511 
   3512 	evcnt_detach(&sc->sc_ev_crcerrs);
   3513 	evcnt_detach(&sc->sc_ev_symerrc);
   3514 	evcnt_detach(&sc->sc_ev_mpc);
   3515 	evcnt_detach(&sc->sc_ev_colc);
   3516 	evcnt_detach(&sc->sc_ev_sec);
   3517 	evcnt_detach(&sc->sc_ev_rlec);
   3518 
   3519 	if (sc->sc_type >= WM_T_82543) {
   3520 		evcnt_detach(&sc->sc_ev_algnerrc);
   3521 		evcnt_detach(&sc->sc_ev_rxerrc);
   3522 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   3523 			evcnt_detach(&sc->sc_ev_cexterr);
   3524 		else
   3525 			evcnt_detach(&sc->sc_ev_htdpmc);
   3526 
   3527 		evcnt_detach(&sc->sc_ev_tncrs);
   3528 		evcnt_detach(&sc->sc_ev_tsctc);
   3529 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   3530 			evcnt_detach(&sc->sc_ev_tsctfc);
   3531 		else {
   3532 			evcnt_detach(&sc->sc_ev_cbrdpc);
   3533 			evcnt_detach(&sc->sc_ev_cbrmpc);
   3534 		}
   3535 	}
   3536 
   3537 	if (sc->sc_type >= WM_T_82542_2_1) {
   3538 		evcnt_detach(&sc->sc_ev_tx_xoff);
   3539 		evcnt_detach(&sc->sc_ev_tx_xon);
   3540 		evcnt_detach(&sc->sc_ev_rx_xoff);
   3541 		evcnt_detach(&sc->sc_ev_rx_xon);
   3542 		evcnt_detach(&sc->sc_ev_rx_macctl);
   3543 	}
   3544 
   3545 	evcnt_detach(&sc->sc_ev_scc);
   3546 	evcnt_detach(&sc->sc_ev_ecol);
   3547 	evcnt_detach(&sc->sc_ev_mcc);
   3548 	evcnt_detach(&sc->sc_ev_latecol);
   3549 
   3550 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
   3551 		evcnt_detach(&sc->sc_ev_cbtmpc);
   3552 
   3553 	evcnt_detach(&sc->sc_ev_dc);
   3554 	evcnt_detach(&sc->sc_ev_prc64);
   3555 	evcnt_detach(&sc->sc_ev_prc127);
   3556 	evcnt_detach(&sc->sc_ev_prc255);
   3557 	evcnt_detach(&sc->sc_ev_prc511);
   3558 	evcnt_detach(&sc->sc_ev_prc1023);
   3559 	evcnt_detach(&sc->sc_ev_prc1522);
   3560 	evcnt_detach(&sc->sc_ev_gprc);
   3561 	evcnt_detach(&sc->sc_ev_bprc);
   3562 	evcnt_detach(&sc->sc_ev_mprc);
   3563 	evcnt_detach(&sc->sc_ev_gptc);
   3564 	evcnt_detach(&sc->sc_ev_gorc);
   3565 	evcnt_detach(&sc->sc_ev_gotc);
   3566 	evcnt_detach(&sc->sc_ev_rnbc);
   3567 	evcnt_detach(&sc->sc_ev_ruc);
   3568 	evcnt_detach(&sc->sc_ev_rfc);
   3569 	evcnt_detach(&sc->sc_ev_roc);
   3570 	evcnt_detach(&sc->sc_ev_rjc);
   3571 	if (sc->sc_type >= WM_T_82540) {
   3572 		evcnt_detach(&sc->sc_ev_mgtprc);
   3573 		evcnt_detach(&sc->sc_ev_mgtpdc);
   3574 		evcnt_detach(&sc->sc_ev_mgtptc);
   3575 	}
   3576 	evcnt_detach(&sc->sc_ev_tor);
   3577 	evcnt_detach(&sc->sc_ev_tot);
   3578 	evcnt_detach(&sc->sc_ev_tpr);
   3579 	evcnt_detach(&sc->sc_ev_tpt);
   3580 	evcnt_detach(&sc->sc_ev_ptc64);
   3581 	evcnt_detach(&sc->sc_ev_ptc127);
   3582 	evcnt_detach(&sc->sc_ev_ptc255);
   3583 	evcnt_detach(&sc->sc_ev_ptc511);
   3584 	evcnt_detach(&sc->sc_ev_ptc1023);
   3585 	evcnt_detach(&sc->sc_ev_ptc1522);
   3586 	evcnt_detach(&sc->sc_ev_mptc);
   3587 	evcnt_detach(&sc->sc_ev_bptc);
   3588 	evcnt_detach(&sc->sc_ev_iac);
   3589 	if (sc->sc_type < WM_T_82575) {
   3590 		evcnt_detach(&sc->sc_ev_icrxptc);
   3591 		evcnt_detach(&sc->sc_ev_icrxatc);
   3592 		evcnt_detach(&sc->sc_ev_ictxptc);
   3593 		evcnt_detach(&sc->sc_ev_ictxatc);
   3594 		evcnt_detach(&sc->sc_ev_ictxqec);
   3595 		evcnt_detach(&sc->sc_ev_ictxqmtc);
   3596 		evcnt_detach(&sc->sc_ev_rxdmtc);
   3597 		evcnt_detach(&sc->sc_ev_icrxoc);
   3598 	} else if (!WM_IS_ICHPCH(sc)) {
   3599 		evcnt_detach(&sc->sc_ev_rpthc);
   3600 		evcnt_detach(&sc->sc_ev_debug1);
   3601 		evcnt_detach(&sc->sc_ev_debug2);
   3602 		evcnt_detach(&sc->sc_ev_debug3);
   3603 		evcnt_detach(&sc->sc_ev_hgptc);
   3604 		evcnt_detach(&sc->sc_ev_debug4);
   3605 		evcnt_detach(&sc->sc_ev_rxdmtc);
   3606 		evcnt_detach(&sc->sc_ev_htcbdpc);
   3607 
   3608 		evcnt_detach(&sc->sc_ev_hgorc);
   3609 		evcnt_detach(&sc->sc_ev_hgotc);
   3610 		evcnt_detach(&sc->sc_ev_lenerrs);
   3611 	}
   3612 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   3613 		evcnt_detach(&sc->sc_ev_tlpic);
   3614 		evcnt_detach(&sc->sc_ev_rlpic);
   3615 		evcnt_detach(&sc->sc_ev_b2ogprc);
   3616 		evcnt_detach(&sc->sc_ev_o2bspc);
   3617 		evcnt_detach(&sc->sc_ev_b2ospc);
   3618 		evcnt_detach(&sc->sc_ev_o2bgptc);
   3619 		evcnt_detach(&sc->sc_ev_scvpc);
   3620 		evcnt_detach(&sc->sc_ev_hrmpc);
   3621 	}
   3622 #endif /* WM_EVENT_COUNTERS */
   3623 
   3624 	rnd_detach_source(&sc->rnd_source);
   3625 
   3626 	/* Tell the firmware about the release */
   3627 	mutex_enter(sc->sc_core_lock);
   3628 	wm_release_manageability(sc);
   3629 	wm_release_hw_control(sc);
   3630 	wm_enable_wakeup(sc);
   3631 	mutex_exit(sc->sc_core_lock);
   3632 
   3633 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3634 
   3635 	ether_ifdetach(ifp);
   3636 	if_detach(ifp);
   3637 	if_percpuq_destroy(sc->sc_ipq);
   3638 
   3639 	/* Delete all remaining media. */
   3640 	ifmedia_fini(&sc->sc_mii.mii_media);
   3641 
   3642 	/* Unload RX dmamaps and free mbufs */
   3643 	for (i = 0; i < sc->sc_nqueues; i++) {
   3644 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3645 		mutex_enter(rxq->rxq_lock);
   3646 		wm_rxdrain(rxq);
   3647 		mutex_exit(rxq->rxq_lock);
   3648 	}
   3649 	/* Must unlock here */
   3650 
   3651 	/* Disestablish the interrupt handler */
   3652 	for (i = 0; i < sc->sc_nintrs; i++) {
   3653 		if (sc->sc_ihs[i] != NULL) {
   3654 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3655 			sc->sc_ihs[i] = NULL;
   3656 		}
   3657 	}
   3658 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3659 
   3660 	/* wm_stop() ensured that the workqueues are stopped. */
   3661 	workqueue_destroy(sc->sc_queue_wq);
   3662 	workqueue_destroy(sc->sc_reset_wq);
   3663 
   3664 	for (i = 0; i < sc->sc_nqueues; i++)
   3665 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3666 
   3667 	wm_free_txrx_queues(sc);
   3668 
   3669 	/* Unmap the registers */
   3670 	if (sc->sc_ss) {
   3671 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3672 		sc->sc_ss = 0;
   3673 	}
   3674 	if (sc->sc_ios) {
   3675 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3676 		sc->sc_ios = 0;
   3677 	}
   3678 	if (sc->sc_flashs) {
   3679 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3680 		sc->sc_flashs = 0;
   3681 	}
   3682 
   3683 	if (sc->sc_core_lock)
   3684 		mutex_obj_free(sc->sc_core_lock);
   3685 	if (sc->sc_ich_phymtx)
   3686 		mutex_obj_free(sc->sc_ich_phymtx);
   3687 	if (sc->sc_ich_nvmmtx)
   3688 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3689 
   3690 	return 0;
   3691 }
   3692 
   3693 static bool
   3694 wm_suspend(device_t self, const pmf_qual_t *qual)
   3695 {
   3696 	struct wm_softc *sc = device_private(self);
   3697 
   3698 	wm_release_manageability(sc);
   3699 	wm_release_hw_control(sc);
   3700 	wm_enable_wakeup(sc);
   3701 
   3702 	return true;
   3703 }
   3704 
   3705 static bool
   3706 wm_resume(device_t self, const pmf_qual_t *qual)
   3707 {
   3708 	struct wm_softc *sc = device_private(self);
   3709 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3710 	pcireg_t reg;
   3711 	char buf[256];
   3712 
   3713 	reg = CSR_READ(sc, WMREG_WUS);
   3714 	if (reg != 0) {
   3715 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3716 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3717 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3718 	}
   3719 
   3720 	if (sc->sc_type >= WM_T_PCH2)
   3721 		wm_resume_workarounds_pchlan(sc);
   3722 	IFNET_LOCK(ifp);
   3723 	if ((ifp->if_flags & IFF_UP) == 0) {
   3724 		/* >= PCH_SPT hardware workaround before reset. */
   3725 		if (sc->sc_type >= WM_T_PCH_SPT)
   3726 			wm_flush_desc_rings(sc);
   3727 
   3728 		wm_reset(sc);
   3729 		/* Non-AMT based hardware can now take control from firmware */
   3730 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3731 			wm_get_hw_control(sc);
   3732 		wm_init_manageability(sc);
   3733 	} else {
   3734 		/*
   3735 		 * We called pmf_class_network_register(), so if_init() is
   3736 		 * automatically called when IFF_UP. wm_reset(),
   3737 		 * wm_get_hw_control() and wm_init_manageability() are called
   3738 		 * via wm_init().
   3739 		 */
   3740 	}
   3741 	IFNET_UNLOCK(ifp);
   3742 
   3743 	return true;
   3744 }
   3745 
   3746 /*
   3747  * wm_watchdog:
   3748  *
   3749  *	Watchdog checker.
   3750  */
   3751 static bool
   3752 wm_watchdog(struct ifnet *ifp)
   3753 {
   3754 	int qid;
   3755 	struct wm_softc *sc = ifp->if_softc;
   3756 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3757 
   3758 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3759 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3760 
   3761 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3762 	}
   3763 
   3764 #ifdef WM_DEBUG
   3765 	if (sc->sc_trigger_reset) {
   3766 		/* debug operation, no need for atomicity or reliability */
   3767 		sc->sc_trigger_reset = 0;
   3768 		hang_queue++;
   3769 	}
   3770 #endif
   3771 
   3772 	if (hang_queue == 0)
   3773 		return true;
   3774 
   3775 	if (atomic_swap_uint(&sc->sc_reset_pending, 1) == 0)
   3776 		workqueue_enqueue(sc->sc_reset_wq, &sc->sc_reset_work, NULL);
   3777 
   3778 	return false;
   3779 }
   3780 
   3781 /*
   3782  * Perform an interface watchdog reset.
   3783  */
   3784 static void
   3785 wm_handle_reset_work(struct work *work, void *arg)
   3786 {
   3787 	struct wm_softc * const sc = arg;
   3788 	struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
   3789 
   3790 	/* Don't want ioctl operations to happen */
   3791 	IFNET_LOCK(ifp);
   3792 
   3793 	/* reset the interface. */
   3794 	wm_init(ifp);
   3795 
   3796 	IFNET_UNLOCK(ifp);
   3797 
   3798 	/*
   3799 	 * There are still some upper layer processing which call
   3800 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   3801 	 */
   3802 	/* Try to get more packets going. */
   3803 	ifp->if_start(ifp);
   3804 
   3805 	atomic_store_relaxed(&sc->sc_reset_pending, 0);
   3806 }
   3807 
   3808 
   3809 static void
   3810 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3811 {
   3812 
   3813 	mutex_enter(txq->txq_lock);
   3814 	if (txq->txq_sending &&
   3815 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3816 		wm_watchdog_txq_locked(ifp, txq, hang);
   3817 
   3818 	mutex_exit(txq->txq_lock);
   3819 }
   3820 
   3821 static void
   3822 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3823     uint16_t *hang)
   3824 {
   3825 	struct wm_softc *sc = ifp->if_softc;
   3826 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3827 
   3828 	KASSERT(mutex_owned(txq->txq_lock));
   3829 
   3830 	/*
   3831 	 * Since we're using delayed interrupts, sweep up
   3832 	 * before we report an error.
   3833 	 */
   3834 	wm_txeof(txq, UINT_MAX);
   3835 
   3836 	if (txq->txq_sending)
   3837 		*hang |= __BIT(wmq->wmq_id);
   3838 
   3839 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3840 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3841 		    device_xname(sc->sc_dev));
   3842 	} else {
   3843 #ifdef WM_DEBUG
   3844 		int i, j;
   3845 		struct wm_txsoft *txs;
   3846 #endif
   3847 		log(LOG_ERR,
   3848 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3849 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3850 		    txq->txq_next);
   3851 		if_statinc(ifp, if_oerrors);
   3852 #ifdef WM_DEBUG
   3853 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3854 		     i = WM_NEXTTXS(txq, i)) {
   3855 			txs = &txq->txq_soft[i];
   3856 			printf("txs %d tx %d -> %d\n",
   3857 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3858 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3859 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3860 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3861 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3862 					printf("\t %#08x%08x\n",
   3863 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3864 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3865 				} else {
   3866 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3867 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3868 					    txq->txq_descs[j].wtx_addr.wa_low);
   3869 					printf("\t %#04x%02x%02x%08x\n",
   3870 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3871 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3872 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3873 					    txq->txq_descs[j].wtx_cmdlen);
   3874 				}
   3875 				if (j == txs->txs_lastdesc)
   3876 					break;
   3877 			}
   3878 		}
   3879 #endif
   3880 	}
   3881 }
   3882 
   3883 /*
   3884  * wm_tick:
   3885  *
   3886  *	One second timer, used to check link status, sweep up
   3887  *	completed transmit jobs, etc.
   3888  */
   3889 static void
   3890 wm_tick(void *arg)
   3891 {
   3892 	struct wm_softc *sc = arg;
   3893 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3894 
   3895 	mutex_enter(sc->sc_core_lock);
   3896 
   3897 	if (sc->sc_core_stopping) {
   3898 		mutex_exit(sc->sc_core_lock);
   3899 		return;
   3900 	}
   3901 
   3902 	wm_update_stats(sc);
   3903 
   3904 	if (sc->sc_flags & WM_F_HAS_MII)
   3905 		mii_tick(&sc->sc_mii);
   3906 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3907 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3908 		wm_serdes_tick(sc);
   3909 	else
   3910 		wm_tbi_tick(sc);
   3911 
   3912 	mutex_exit(sc->sc_core_lock);
   3913 
   3914 	if (wm_watchdog(ifp))
   3915 		callout_schedule(&sc->sc_tick_ch, hz);
   3916 }
   3917 
   3918 static int
   3919 wm_ifflags_cb(struct ethercom *ec)
   3920 {
   3921 	struct ifnet *ifp = &ec->ec_if;
   3922 	struct wm_softc *sc = ifp->if_softc;
   3923 	u_short iffchange;
   3924 	int ecchange;
   3925 	bool needreset = false;
   3926 	int rc = 0;
   3927 
   3928 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3929 		device_xname(sc->sc_dev), __func__));
   3930 
   3931 	KASSERT(IFNET_LOCKED(ifp));
   3932 
   3933 	mutex_enter(sc->sc_core_lock);
   3934 
   3935 	/*
   3936 	 * Check for if_flags.
   3937 	 * Main usage is to prevent linkdown when opening bpf.
   3938 	 */
   3939 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3940 	sc->sc_if_flags = ifp->if_flags;
   3941 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3942 		needreset = true;
   3943 		goto ec;
   3944 	}
   3945 
   3946 	/* iff related updates */
   3947 	if ((iffchange & IFF_PROMISC) != 0)
   3948 		wm_set_filter(sc);
   3949 
   3950 	wm_set_vlan(sc);
   3951 
   3952 ec:
   3953 	/* Check for ec_capenable. */
   3954 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3955 	sc->sc_ec_capenable = ec->ec_capenable;
   3956 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3957 		needreset = true;
   3958 		goto out;
   3959 	}
   3960 
   3961 	/* ec related updates */
   3962 	wm_set_eee(sc);
   3963 
   3964 out:
   3965 	if (needreset)
   3966 		rc = ENETRESET;
   3967 	mutex_exit(sc->sc_core_lock);
   3968 
   3969 	return rc;
   3970 }
   3971 
   3972 static bool
   3973 wm_phy_need_linkdown_discard(struct wm_softc *sc)
   3974 {
   3975 
   3976 	switch (sc->sc_phytype) {
   3977 	case WMPHY_82577: /* ihphy */
   3978 	case WMPHY_82578: /* atphy */
   3979 	case WMPHY_82579: /* ihphy */
   3980 	case WMPHY_I217: /* ihphy */
   3981 	case WMPHY_82580: /* ihphy */
   3982 	case WMPHY_I350: /* ihphy */
   3983 		return true;
   3984 	default:
   3985 		return false;
   3986 	}
   3987 }
   3988 
   3989 static void
   3990 wm_set_linkdown_discard(struct wm_softc *sc)
   3991 {
   3992 
   3993 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3994 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3995 
   3996 		mutex_enter(txq->txq_lock);
   3997 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
   3998 		mutex_exit(txq->txq_lock);
   3999 	}
   4000 }
   4001 
   4002 static void
   4003 wm_clear_linkdown_discard(struct wm_softc *sc)
   4004 {
   4005 
   4006 	for (int i = 0; i < sc->sc_nqueues; i++) {
   4007 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4008 
   4009 		mutex_enter(txq->txq_lock);
   4010 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
   4011 		mutex_exit(txq->txq_lock);
   4012 	}
   4013 }
   4014 
   4015 /*
   4016  * wm_ioctl:		[ifnet interface function]
   4017  *
   4018  *	Handle control requests from the operator.
   4019  */
   4020 static int
   4021 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   4022 {
   4023 	struct wm_softc *sc = ifp->if_softc;
   4024 	struct ifreq *ifr = (struct ifreq *)data;
   4025 	struct ifaddr *ifa = (struct ifaddr *)data;
   4026 	struct sockaddr_dl *sdl;
   4027 	int error;
   4028 
   4029 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4030 		device_xname(sc->sc_dev), __func__));
   4031 
   4032 	switch (cmd) {
   4033 	case SIOCADDMULTI:
   4034 	case SIOCDELMULTI:
   4035 		break;
   4036 	default:
   4037 		KASSERT(IFNET_LOCKED(ifp));
   4038 	}
   4039 
   4040 	if (cmd == SIOCZIFDATA) {
   4041 		/*
   4042 		 * Special handling for SIOCZIFDATA.
   4043 		 * Copying and clearing the if_data structure is done with
   4044 		 * ether_ioctl() below.
   4045 		 */
   4046 		mutex_enter(sc->sc_core_lock);
   4047 		wm_update_stats(sc);
   4048 		wm_clear_evcnt(sc);
   4049 		mutex_exit(sc->sc_core_lock);
   4050 	}
   4051 
   4052 	switch (cmd) {
   4053 	case SIOCSIFMEDIA:
   4054 		mutex_enter(sc->sc_core_lock);
   4055 		/* Flow control requires full-duplex mode. */
   4056 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   4057 		    (ifr->ifr_media & IFM_FDX) == 0)
   4058 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   4059 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   4060 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   4061 				/* We can do both TXPAUSE and RXPAUSE. */
   4062 				ifr->ifr_media |=
   4063 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   4064 			}
   4065 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   4066 		}
   4067 		mutex_exit(sc->sc_core_lock);
   4068 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   4069 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
   4070 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE) {
   4071 				DPRINTF(sc, WM_DEBUG_LINK,
   4072 				    ("%s: %s: Set linkdown discard flag\n",
   4073 					device_xname(sc->sc_dev), __func__));
   4074 				wm_set_linkdown_discard(sc);
   4075 			}
   4076 		}
   4077 		break;
   4078 	case SIOCINITIFADDR:
   4079 		mutex_enter(sc->sc_core_lock);
   4080 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   4081 			sdl = satosdl(ifp->if_dl->ifa_addr);
   4082 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   4083 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   4084 			/* Unicast address is the first multicast entry */
   4085 			wm_set_filter(sc);
   4086 			error = 0;
   4087 			mutex_exit(sc->sc_core_lock);
   4088 			break;
   4089 		}
   4090 		mutex_exit(sc->sc_core_lock);
   4091 		/*FALLTHROUGH*/
   4092 	default:
   4093 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
   4094 			if (((ifp->if_flags & IFF_UP) != 0) &&
   4095 			    ((ifr->ifr_flags & IFF_UP) == 0)) {
   4096 				DPRINTF(sc, WM_DEBUG_LINK,
   4097 				    ("%s: %s: Set linkdown discard flag\n",
   4098 					device_xname(sc->sc_dev), __func__));
   4099 				wm_set_linkdown_discard(sc);
   4100 			}
   4101 		}
   4102 		const int s = splnet();
   4103 		/* It may call wm_start, so unlock here */
   4104 		error = ether_ioctl(ifp, cmd, data);
   4105 		splx(s);
   4106 		if (error != ENETRESET)
   4107 			break;
   4108 
   4109 		error = 0;
   4110 
   4111 		if (cmd == SIOCSIFCAP)
   4112 			error = if_init(ifp);
   4113 		else if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
   4114 			mutex_enter(sc->sc_core_lock);
   4115 			if (sc->sc_if_flags & IFF_RUNNING) {
   4116 				/*
   4117 				 * Multicast list has changed; set the
   4118 				 * hardware filter accordingly.
   4119 				 */
   4120 				wm_set_filter(sc);
   4121 			}
   4122 			mutex_exit(sc->sc_core_lock);
   4123 		}
   4124 		break;
   4125 	}
   4126 
   4127 	return error;
   4128 }
   4129 
   4130 /* MAC address related */
   4131 
   4132 /*
   4133  * Get the offset of MAC address and return it.
   4134  * If error occured, use offset 0.
   4135  */
   4136 static uint16_t
   4137 wm_check_alt_mac_addr(struct wm_softc *sc)
   4138 {
   4139 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4140 	uint16_t offset = NVM_OFF_MACADDR;
   4141 
   4142 	/* Try to read alternative MAC address pointer */
   4143 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   4144 		return 0;
   4145 
   4146 	/* Check pointer if it's valid or not. */
   4147 	if ((offset == 0x0000) || (offset == 0xffff))
   4148 		return 0;
   4149 
   4150 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   4151 	/*
   4152 	 * Check whether alternative MAC address is valid or not.
   4153 	 * Some cards have non 0xffff pointer but those don't use
   4154 	 * alternative MAC address in reality.
   4155 	 *
   4156 	 * Check whether the broadcast bit is set or not.
   4157 	 */
   4158 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   4159 		if (((myea[0] & 0xff) & 0x01) == 0)
   4160 			return offset; /* Found */
   4161 
   4162 	/* Not found */
   4163 	return 0;
   4164 }
   4165 
   4166 static int
   4167 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   4168 {
   4169 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4170 	uint16_t offset = NVM_OFF_MACADDR;
   4171 	int do_invert = 0;
   4172 
   4173 	switch (sc->sc_type) {
   4174 	case WM_T_82580:
   4175 	case WM_T_I350:
   4176 	case WM_T_I354:
   4177 		/* EEPROM Top Level Partitioning */
   4178 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   4179 		break;
   4180 	case WM_T_82571:
   4181 	case WM_T_82575:
   4182 	case WM_T_82576:
   4183 	case WM_T_80003:
   4184 	case WM_T_I210:
   4185 	case WM_T_I211:
   4186 		offset = wm_check_alt_mac_addr(sc);
   4187 		if (offset == 0)
   4188 			if ((sc->sc_funcid & 0x01) == 1)
   4189 				do_invert = 1;
   4190 		break;
   4191 	default:
   4192 		if ((sc->sc_funcid & 0x01) == 1)
   4193 			do_invert = 1;
   4194 		break;
   4195 	}
   4196 
   4197 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   4198 		goto bad;
   4199 
   4200 	enaddr[0] = myea[0] & 0xff;
   4201 	enaddr[1] = myea[0] >> 8;
   4202 	enaddr[2] = myea[1] & 0xff;
   4203 	enaddr[3] = myea[1] >> 8;
   4204 	enaddr[4] = myea[2] & 0xff;
   4205 	enaddr[5] = myea[2] >> 8;
   4206 
   4207 	/*
   4208 	 * Toggle the LSB of the MAC address on the second port
   4209 	 * of some dual port cards.
   4210 	 */
   4211 	if (do_invert != 0)
   4212 		enaddr[5] ^= 1;
   4213 
   4214 	return 0;
   4215 
   4216 bad:
   4217 	return -1;
   4218 }
   4219 
   4220 /*
   4221  * wm_set_ral:
   4222  *
   4223  *	Set an entery in the receive address list.
   4224  */
   4225 static void
   4226 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   4227 {
   4228 	uint32_t ral_lo, ral_hi, addrl, addrh;
   4229 	uint32_t wlock_mac;
   4230 	int rv;
   4231 
   4232 	if (enaddr != NULL) {
   4233 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   4234 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   4235 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   4236 		ral_hi |= RAL_AV;
   4237 	} else {
   4238 		ral_lo = 0;
   4239 		ral_hi = 0;
   4240 	}
   4241 
   4242 	switch (sc->sc_type) {
   4243 	case WM_T_82542_2_0:
   4244 	case WM_T_82542_2_1:
   4245 	case WM_T_82543:
   4246 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   4247 		CSR_WRITE_FLUSH(sc);
   4248 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   4249 		CSR_WRITE_FLUSH(sc);
   4250 		break;
   4251 	case WM_T_PCH2:
   4252 	case WM_T_PCH_LPT:
   4253 	case WM_T_PCH_SPT:
   4254 	case WM_T_PCH_CNP:
   4255 		if (idx == 0) {
   4256 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4257 			CSR_WRITE_FLUSH(sc);
   4258 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4259 			CSR_WRITE_FLUSH(sc);
   4260 			return;
   4261 		}
   4262 		if (sc->sc_type != WM_T_PCH2) {
   4263 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   4264 			    FWSM_WLOCK_MAC);
   4265 			addrl = WMREG_SHRAL(idx - 1);
   4266 			addrh = WMREG_SHRAH(idx - 1);
   4267 		} else {
   4268 			wlock_mac = 0;
   4269 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   4270 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   4271 		}
   4272 
   4273 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   4274 			rv = wm_get_swflag_ich8lan(sc);
   4275 			if (rv != 0)
   4276 				return;
   4277 			CSR_WRITE(sc, addrl, ral_lo);
   4278 			CSR_WRITE_FLUSH(sc);
   4279 			CSR_WRITE(sc, addrh, ral_hi);
   4280 			CSR_WRITE_FLUSH(sc);
   4281 			wm_put_swflag_ich8lan(sc);
   4282 		}
   4283 
   4284 		break;
   4285 	default:
   4286 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4287 		CSR_WRITE_FLUSH(sc);
   4288 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4289 		CSR_WRITE_FLUSH(sc);
   4290 		break;
   4291 	}
   4292 }
   4293 
   4294 /*
   4295  * wm_mchash:
   4296  *
   4297  *	Compute the hash of the multicast address for the 4096-bit
   4298  *	multicast filter.
   4299  */
   4300 static uint32_t
   4301 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   4302 {
   4303 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   4304 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   4305 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   4306 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   4307 	uint32_t hash;
   4308 
   4309 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4310 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4311 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4312 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   4313 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   4314 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   4315 		return (hash & 0x3ff);
   4316 	}
   4317 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   4318 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   4319 
   4320 	return (hash & 0xfff);
   4321 }
   4322 
   4323 /*
   4324  *
   4325  *
   4326  */
   4327 static int
   4328 wm_rar_count(struct wm_softc *sc)
   4329 {
   4330 	int size;
   4331 
   4332 	switch (sc->sc_type) {
   4333 	case WM_T_ICH8:
   4334 		size = WM_RAL_TABSIZE_ICH8 -1;
   4335 		break;
   4336 	case WM_T_ICH9:
   4337 	case WM_T_ICH10:
   4338 	case WM_T_PCH:
   4339 		size = WM_RAL_TABSIZE_ICH8;
   4340 		break;
   4341 	case WM_T_PCH2:
   4342 		size = WM_RAL_TABSIZE_PCH2;
   4343 		break;
   4344 	case WM_T_PCH_LPT:
   4345 	case WM_T_PCH_SPT:
   4346 	case WM_T_PCH_CNP:
   4347 		size = WM_RAL_TABSIZE_PCH_LPT;
   4348 		break;
   4349 	case WM_T_82575:
   4350 	case WM_T_I210:
   4351 	case WM_T_I211:
   4352 		size = WM_RAL_TABSIZE_82575;
   4353 		break;
   4354 	case WM_T_82576:
   4355 	case WM_T_82580:
   4356 		size = WM_RAL_TABSIZE_82576;
   4357 		break;
   4358 	case WM_T_I350:
   4359 	case WM_T_I354:
   4360 		size = WM_RAL_TABSIZE_I350;
   4361 		break;
   4362 	default:
   4363 		size = WM_RAL_TABSIZE;
   4364 	}
   4365 
   4366 	return size;
   4367 }
   4368 
   4369 /*
   4370  * wm_set_filter:
   4371  *
   4372  *	Set up the receive filter.
   4373  */
   4374 static void
   4375 wm_set_filter(struct wm_softc *sc)
   4376 {
   4377 	struct ethercom *ec = &sc->sc_ethercom;
   4378 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   4379 	struct ether_multi *enm;
   4380 	struct ether_multistep step;
   4381 	bus_addr_t mta_reg;
   4382 	uint32_t hash, reg, bit;
   4383 	int i, size, ralmax, rv;
   4384 
   4385 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4386 		device_xname(sc->sc_dev), __func__));
   4387 	KASSERT(mutex_owned(sc->sc_core_lock));
   4388 
   4389 	if (sc->sc_type >= WM_T_82544)
   4390 		mta_reg = WMREG_CORDOVA_MTA;
   4391 	else
   4392 		mta_reg = WMREG_MTA;
   4393 
   4394 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   4395 
   4396 	if (sc->sc_if_flags & IFF_BROADCAST)
   4397 		sc->sc_rctl |= RCTL_BAM;
   4398 	if (sc->sc_if_flags & IFF_PROMISC) {
   4399 		sc->sc_rctl |= RCTL_UPE;
   4400 		ETHER_LOCK(ec);
   4401 		ec->ec_flags |= ETHER_F_ALLMULTI;
   4402 		ETHER_UNLOCK(ec);
   4403 		goto allmulti;
   4404 	}
   4405 
   4406 	/*
   4407 	 * Set the station address in the first RAL slot, and
   4408 	 * clear the remaining slots.
   4409 	 */
   4410 	size = wm_rar_count(sc);
   4411 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   4412 
   4413 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   4414 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   4415 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   4416 		switch (i) {
   4417 		case 0:
   4418 			/* We can use all entries */
   4419 			ralmax = size;
   4420 			break;
   4421 		case 1:
   4422 			/* Only RAR[0] */
   4423 			ralmax = 1;
   4424 			break;
   4425 		default:
   4426 			/* Available SHRA + RAR[0] */
   4427 			ralmax = i + 1;
   4428 		}
   4429 	} else
   4430 		ralmax = size;
   4431 	for (i = 1; i < size; i++) {
   4432 		if (i < ralmax)
   4433 			wm_set_ral(sc, NULL, i);
   4434 	}
   4435 
   4436 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4437 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4438 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4439 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   4440 		size = WM_ICH8_MC_TABSIZE;
   4441 	else
   4442 		size = WM_MC_TABSIZE;
   4443 	/* Clear out the multicast table. */
   4444 	for (i = 0; i < size; i++) {
   4445 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   4446 		CSR_WRITE_FLUSH(sc);
   4447 	}
   4448 
   4449 	ETHER_LOCK(ec);
   4450 	ETHER_FIRST_MULTI(step, ec, enm);
   4451 	while (enm != NULL) {
   4452 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   4453 			ec->ec_flags |= ETHER_F_ALLMULTI;
   4454 			ETHER_UNLOCK(ec);
   4455 			/*
   4456 			 * We must listen to a range of multicast addresses.
   4457 			 * For now, just accept all multicasts, rather than
   4458 			 * trying to set only those filter bits needed to match
   4459 			 * the range.  (At this time, the only use of address
   4460 			 * ranges is for IP multicast routing, for which the
   4461 			 * range is big enough to require all bits set.)
   4462 			 */
   4463 			goto allmulti;
   4464 		}
   4465 
   4466 		hash = wm_mchash(sc, enm->enm_addrlo);
   4467 
   4468 		reg = (hash >> 5);
   4469 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4470 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4471 		    || (sc->sc_type == WM_T_PCH2)
   4472 		    || (sc->sc_type == WM_T_PCH_LPT)
   4473 		    || (sc->sc_type == WM_T_PCH_SPT)
   4474 		    || (sc->sc_type == WM_T_PCH_CNP))
   4475 			reg &= 0x1f;
   4476 		else
   4477 			reg &= 0x7f;
   4478 		bit = hash & 0x1f;
   4479 
   4480 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   4481 		hash |= 1U << bit;
   4482 
   4483 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   4484 			/*
   4485 			 * 82544 Errata 9: Certain register cannot be written
   4486 			 * with particular alignments in PCI-X bus operation
   4487 			 * (FCAH, MTA and VFTA).
   4488 			 */
   4489 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   4490 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4491 			CSR_WRITE_FLUSH(sc);
   4492 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   4493 			CSR_WRITE_FLUSH(sc);
   4494 		} else {
   4495 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4496 			CSR_WRITE_FLUSH(sc);
   4497 		}
   4498 
   4499 		ETHER_NEXT_MULTI(step, enm);
   4500 	}
   4501 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4502 	ETHER_UNLOCK(ec);
   4503 
   4504 	goto setit;
   4505 
   4506 allmulti:
   4507 	sc->sc_rctl |= RCTL_MPE;
   4508 
   4509 setit:
   4510 	if (sc->sc_type >= WM_T_PCH2) {
   4511 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4512 		    && (ifp->if_mtu > ETHERMTU))
   4513 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
   4514 		else
   4515 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
   4516 		if (rv != 0)
   4517 			device_printf(sc->sc_dev,
   4518 			    "Failed to do workaround for jumbo frame.\n");
   4519 	}
   4520 
   4521 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   4522 }
   4523 
   4524 /* Reset and init related */
   4525 
   4526 static void
   4527 wm_set_vlan(struct wm_softc *sc)
   4528 {
   4529 
   4530 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4531 		device_xname(sc->sc_dev), __func__));
   4532 
   4533 	/* Deal with VLAN enables. */
   4534 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   4535 		sc->sc_ctrl |= CTRL_VME;
   4536 	else
   4537 		sc->sc_ctrl &= ~CTRL_VME;
   4538 
   4539 	/* Write the control registers. */
   4540 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4541 }
   4542 
   4543 static void
   4544 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   4545 {
   4546 	uint32_t gcr;
   4547 	pcireg_t ctrl2;
   4548 
   4549 	gcr = CSR_READ(sc, WMREG_GCR);
   4550 
   4551 	/* Only take action if timeout value is defaulted to 0 */
   4552 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   4553 		goto out;
   4554 
   4555 	if ((gcr & GCR_CAP_VER2) == 0) {
   4556 		gcr |= GCR_CMPL_TMOUT_10MS;
   4557 		goto out;
   4558 	}
   4559 
   4560 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4561 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   4562 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   4563 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4564 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   4565 
   4566 out:
   4567 	/* Disable completion timeout resend */
   4568 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   4569 
   4570 	CSR_WRITE(sc, WMREG_GCR, gcr);
   4571 }
   4572 
   4573 void
   4574 wm_get_auto_rd_done(struct wm_softc *sc)
   4575 {
   4576 	int i;
   4577 
   4578 	/* wait for eeprom to reload */
   4579 	switch (sc->sc_type) {
   4580 	case WM_T_82571:
   4581 	case WM_T_82572:
   4582 	case WM_T_82573:
   4583 	case WM_T_82574:
   4584 	case WM_T_82583:
   4585 	case WM_T_82575:
   4586 	case WM_T_82576:
   4587 	case WM_T_82580:
   4588 	case WM_T_I350:
   4589 	case WM_T_I354:
   4590 	case WM_T_I210:
   4591 	case WM_T_I211:
   4592 	case WM_T_80003:
   4593 	case WM_T_ICH8:
   4594 	case WM_T_ICH9:
   4595 		for (i = 0; i < 10; i++) {
   4596 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   4597 				break;
   4598 			delay(1000);
   4599 		}
   4600 		if (i == 10) {
   4601 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4602 			    "complete\n", device_xname(sc->sc_dev));
   4603 		}
   4604 		break;
   4605 	default:
   4606 		break;
   4607 	}
   4608 }
   4609 
   4610 void
   4611 wm_lan_init_done(struct wm_softc *sc)
   4612 {
   4613 	uint32_t reg = 0;
   4614 	int i;
   4615 
   4616 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4617 		device_xname(sc->sc_dev), __func__));
   4618 
   4619 	/* Wait for eeprom to reload */
   4620 	switch (sc->sc_type) {
   4621 	case WM_T_ICH10:
   4622 	case WM_T_PCH:
   4623 	case WM_T_PCH2:
   4624 	case WM_T_PCH_LPT:
   4625 	case WM_T_PCH_SPT:
   4626 	case WM_T_PCH_CNP:
   4627 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4628 			reg = CSR_READ(sc, WMREG_STATUS);
   4629 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4630 				break;
   4631 			delay(100);
   4632 		}
   4633 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4634 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4635 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4636 		}
   4637 		break;
   4638 	default:
   4639 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4640 		    __func__);
   4641 		break;
   4642 	}
   4643 
   4644 	reg &= ~STATUS_LAN_INIT_DONE;
   4645 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4646 }
   4647 
   4648 void
   4649 wm_get_cfg_done(struct wm_softc *sc)
   4650 {
   4651 	int mask;
   4652 	uint32_t reg;
   4653 	int i;
   4654 
   4655 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4656 		device_xname(sc->sc_dev), __func__));
   4657 
   4658 	/* Wait for eeprom to reload */
   4659 	switch (sc->sc_type) {
   4660 	case WM_T_82542_2_0:
   4661 	case WM_T_82542_2_1:
   4662 		/* null */
   4663 		break;
   4664 	case WM_T_82543:
   4665 	case WM_T_82544:
   4666 	case WM_T_82540:
   4667 	case WM_T_82545:
   4668 	case WM_T_82545_3:
   4669 	case WM_T_82546:
   4670 	case WM_T_82546_3:
   4671 	case WM_T_82541:
   4672 	case WM_T_82541_2:
   4673 	case WM_T_82547:
   4674 	case WM_T_82547_2:
   4675 	case WM_T_82573:
   4676 	case WM_T_82574:
   4677 	case WM_T_82583:
   4678 		/* generic */
   4679 		delay(10*1000);
   4680 		break;
   4681 	case WM_T_80003:
   4682 	case WM_T_82571:
   4683 	case WM_T_82572:
   4684 	case WM_T_82575:
   4685 	case WM_T_82576:
   4686 	case WM_T_82580:
   4687 	case WM_T_I350:
   4688 	case WM_T_I354:
   4689 	case WM_T_I210:
   4690 	case WM_T_I211:
   4691 		if (sc->sc_type == WM_T_82571) {
   4692 			/* Only 82571 shares port 0 */
   4693 			mask = EEMNGCTL_CFGDONE_0;
   4694 		} else
   4695 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4696 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4697 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4698 				break;
   4699 			delay(1000);
   4700 		}
   4701 		if (i >= WM_PHY_CFG_TIMEOUT)
   4702 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
   4703 				device_xname(sc->sc_dev), __func__));
   4704 		break;
   4705 	case WM_T_ICH8:
   4706 	case WM_T_ICH9:
   4707 	case WM_T_ICH10:
   4708 	case WM_T_PCH:
   4709 	case WM_T_PCH2:
   4710 	case WM_T_PCH_LPT:
   4711 	case WM_T_PCH_SPT:
   4712 	case WM_T_PCH_CNP:
   4713 		delay(10*1000);
   4714 		if (sc->sc_type >= WM_T_ICH10)
   4715 			wm_lan_init_done(sc);
   4716 		else
   4717 			wm_get_auto_rd_done(sc);
   4718 
   4719 		/* Clear PHY Reset Asserted bit */
   4720 		reg = CSR_READ(sc, WMREG_STATUS);
   4721 		if ((reg & STATUS_PHYRA) != 0)
   4722 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4723 		break;
   4724 	default:
   4725 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4726 		    __func__);
   4727 		break;
   4728 	}
   4729 }
   4730 
   4731 int
   4732 wm_phy_post_reset(struct wm_softc *sc)
   4733 {
   4734 	device_t dev = sc->sc_dev;
   4735 	uint16_t reg;
   4736 	int rv = 0;
   4737 
   4738 	/* This function is only for ICH8 and newer. */
   4739 	if (sc->sc_type < WM_T_ICH8)
   4740 		return 0;
   4741 
   4742 	if (wm_phy_resetisblocked(sc)) {
   4743 		/* XXX */
   4744 		device_printf(dev, "PHY is blocked\n");
   4745 		return -1;
   4746 	}
   4747 
   4748 	/* Allow time for h/w to get to quiescent state after reset */
   4749 	delay(10*1000);
   4750 
   4751 	/* Perform any necessary post-reset workarounds */
   4752 	if (sc->sc_type == WM_T_PCH)
   4753 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4754 	else if (sc->sc_type == WM_T_PCH2)
   4755 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4756 	if (rv != 0)
   4757 		return rv;
   4758 
   4759 	/* Clear the host wakeup bit after lcd reset */
   4760 	if (sc->sc_type >= WM_T_PCH) {
   4761 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4762 		reg &= ~BM_WUC_HOST_WU_BIT;
   4763 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4764 	}
   4765 
   4766 	/* Configure the LCD with the extended configuration region in NVM */
   4767 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4768 		return rv;
   4769 
   4770 	/* Configure the LCD with the OEM bits in NVM */
   4771 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4772 
   4773 	if (sc->sc_type == WM_T_PCH2) {
   4774 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4775 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4776 			delay(10 * 1000);
   4777 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4778 		}
   4779 		/* Set EEE LPI Update Timer to 200usec */
   4780 		rv = sc->phy.acquire(sc);
   4781 		if (rv)
   4782 			return rv;
   4783 		rv = wm_write_emi_reg_locked(dev,
   4784 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4785 		sc->phy.release(sc);
   4786 	}
   4787 
   4788 	return rv;
   4789 }
   4790 
   4791 /* Only for PCH and newer */
   4792 static int
   4793 wm_write_smbus_addr(struct wm_softc *sc)
   4794 {
   4795 	uint32_t strap, freq;
   4796 	uint16_t phy_data;
   4797 	int rv;
   4798 
   4799 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4800 		device_xname(sc->sc_dev), __func__));
   4801 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4802 
   4803 	strap = CSR_READ(sc, WMREG_STRAP);
   4804 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4805 
   4806 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4807 	if (rv != 0)
   4808 		return rv;
   4809 
   4810 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4811 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4812 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4813 
   4814 	if (sc->sc_phytype == WMPHY_I217) {
   4815 		/* Restore SMBus frequency */
   4816 		if (freq --) {
   4817 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4818 			    | HV_SMB_ADDR_FREQ_HIGH);
   4819 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4820 			    HV_SMB_ADDR_FREQ_LOW);
   4821 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4822 			    HV_SMB_ADDR_FREQ_HIGH);
   4823 		} else
   4824 			DPRINTF(sc, WM_DEBUG_INIT,
   4825 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4826 				device_xname(sc->sc_dev), __func__));
   4827 	}
   4828 
   4829 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4830 	    phy_data);
   4831 }
   4832 
   4833 static int
   4834 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4835 {
   4836 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4837 	uint16_t phy_page = 0;
   4838 	int rv = 0;
   4839 
   4840 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4841 		device_xname(sc->sc_dev), __func__));
   4842 
   4843 	switch (sc->sc_type) {
   4844 	case WM_T_ICH8:
   4845 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4846 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4847 			return 0;
   4848 
   4849 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4850 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4851 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4852 			break;
   4853 		}
   4854 		/* FALLTHROUGH */
   4855 	case WM_T_PCH:
   4856 	case WM_T_PCH2:
   4857 	case WM_T_PCH_LPT:
   4858 	case WM_T_PCH_SPT:
   4859 	case WM_T_PCH_CNP:
   4860 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4861 		break;
   4862 	default:
   4863 		return 0;
   4864 	}
   4865 
   4866 	if ((rv = sc->phy.acquire(sc)) != 0)
   4867 		return rv;
   4868 
   4869 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4870 	if ((reg & sw_cfg_mask) == 0)
   4871 		goto release;
   4872 
   4873 	/*
   4874 	 * Make sure HW does not configure LCD from PHY extended configuration
   4875 	 * before SW configuration
   4876 	 */
   4877 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4878 	if ((sc->sc_type < WM_T_PCH2)
   4879 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4880 		goto release;
   4881 
   4882 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4883 		device_xname(sc->sc_dev), __func__));
   4884 	/* word_addr is in DWORD */
   4885 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4886 
   4887 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4888 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4889 	if (cnf_size == 0)
   4890 		goto release;
   4891 
   4892 	if (((sc->sc_type == WM_T_PCH)
   4893 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4894 	    || (sc->sc_type > WM_T_PCH)) {
   4895 		/*
   4896 		 * HW configures the SMBus address and LEDs when the OEM and
   4897 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4898 		 * are cleared, SW will configure them instead.
   4899 		 */
   4900 		DPRINTF(sc, WM_DEBUG_INIT,
   4901 		    ("%s: %s: Configure SMBus and LED\n",
   4902 			device_xname(sc->sc_dev), __func__));
   4903 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4904 			goto release;
   4905 
   4906 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4907 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4908 		    (uint16_t)reg);
   4909 		if (rv != 0)
   4910 			goto release;
   4911 	}
   4912 
   4913 	/* Configure LCD from extended configuration region. */
   4914 	for (i = 0; i < cnf_size; i++) {
   4915 		uint16_t reg_data, reg_addr;
   4916 
   4917 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4918 			goto release;
   4919 
   4920 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4921 			goto release;
   4922 
   4923 		if (reg_addr == IGPHY_PAGE_SELECT)
   4924 			phy_page = reg_data;
   4925 
   4926 		reg_addr &= IGPHY_MAXREGADDR;
   4927 		reg_addr |= phy_page;
   4928 
   4929 		KASSERT(sc->phy.writereg_locked != NULL);
   4930 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4931 		    reg_data);
   4932 	}
   4933 
   4934 release:
   4935 	sc->phy.release(sc);
   4936 	return rv;
   4937 }
   4938 
   4939 /*
   4940  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4941  *  @sc:       pointer to the HW structure
   4942  *  @d0_state: boolean if entering d0 or d3 device state
   4943  *
   4944  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4945  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4946  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4947  */
   4948 int
   4949 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4950 {
   4951 	uint32_t mac_reg;
   4952 	uint16_t oem_reg;
   4953 	int rv;
   4954 
   4955 	if (sc->sc_type < WM_T_PCH)
   4956 		return 0;
   4957 
   4958 	rv = sc->phy.acquire(sc);
   4959 	if (rv != 0)
   4960 		return rv;
   4961 
   4962 	if (sc->sc_type == WM_T_PCH) {
   4963 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4964 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4965 			goto release;
   4966 	}
   4967 
   4968 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4969 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4970 		goto release;
   4971 
   4972 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4973 
   4974 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4975 	if (rv != 0)
   4976 		goto release;
   4977 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4978 
   4979 	if (d0_state) {
   4980 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4981 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4982 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4983 			oem_reg |= HV_OEM_BITS_LPLU;
   4984 	} else {
   4985 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4986 		    != 0)
   4987 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4988 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4989 		    != 0)
   4990 			oem_reg |= HV_OEM_BITS_LPLU;
   4991 	}
   4992 
   4993 	/* Set Restart auto-neg to activate the bits */
   4994 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4995 	    && (wm_phy_resetisblocked(sc) == false))
   4996 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4997 
   4998 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4999 
   5000 release:
   5001 	sc->phy.release(sc);
   5002 
   5003 	return rv;
   5004 }
   5005 
   5006 /* Init hardware bits */
   5007 void
   5008 wm_initialize_hardware_bits(struct wm_softc *sc)
   5009 {
   5010 	uint32_t tarc0, tarc1, reg;
   5011 
   5012 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5013 		device_xname(sc->sc_dev), __func__));
   5014 
   5015 	/* For 82571 variant, 80003 and ICHs */
   5016 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   5017 	    || WM_IS_ICHPCH(sc)) {
   5018 
   5019 		/* Transmit Descriptor Control 0 */
   5020 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   5021 		reg |= TXDCTL_COUNT_DESC;
   5022 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   5023 
   5024 		/* Transmit Descriptor Control 1 */
   5025 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   5026 		reg |= TXDCTL_COUNT_DESC;
   5027 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   5028 
   5029 		/* TARC0 */
   5030 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   5031 		switch (sc->sc_type) {
   5032 		case WM_T_82571:
   5033 		case WM_T_82572:
   5034 		case WM_T_82573:
   5035 		case WM_T_82574:
   5036 		case WM_T_82583:
   5037 		case WM_T_80003:
   5038 			/* Clear bits 30..27 */
   5039 			tarc0 &= ~__BITS(30, 27);
   5040 			break;
   5041 		default:
   5042 			break;
   5043 		}
   5044 
   5045 		switch (sc->sc_type) {
   5046 		case WM_T_82571:
   5047 		case WM_T_82572:
   5048 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   5049 
   5050 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5051 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   5052 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   5053 			/* 8257[12] Errata No.7 */
   5054 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   5055 
   5056 			/* TARC1 bit 28 */
   5057 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5058 				tarc1 &= ~__BIT(28);
   5059 			else
   5060 				tarc1 |= __BIT(28);
   5061 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5062 
   5063 			/*
   5064 			 * 8257[12] Errata No.13
   5065 			 * Disable Dyamic Clock Gating.
   5066 			 */
   5067 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5068 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   5069 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5070 			break;
   5071 		case WM_T_82573:
   5072 		case WM_T_82574:
   5073 		case WM_T_82583:
   5074 			if ((sc->sc_type == WM_T_82574)
   5075 			    || (sc->sc_type == WM_T_82583))
   5076 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   5077 
   5078 			/* Extended Device Control */
   5079 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5080 			reg &= ~__BIT(23);	/* Clear bit 23 */
   5081 			reg |= __BIT(22);	/* Set bit 22 */
   5082 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5083 
   5084 			/* Device Control */
   5085 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   5086 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5087 
   5088 			/* PCIe Control Register */
   5089 			/*
   5090 			 * 82573 Errata (unknown).
   5091 			 *
   5092 			 * 82574 Errata 25 and 82583 Errata 12
   5093 			 * "Dropped Rx Packets":
   5094 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   5095 			 */
   5096 			reg = CSR_READ(sc, WMREG_GCR);
   5097 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   5098 			CSR_WRITE(sc, WMREG_GCR, reg);
   5099 
   5100 			if ((sc->sc_type == WM_T_82574)
   5101 			    || (sc->sc_type == WM_T_82583)) {
   5102 				/*
   5103 				 * Document says this bit must be set for
   5104 				 * proper operation.
   5105 				 */
   5106 				reg = CSR_READ(sc, WMREG_GCR);
   5107 				reg |= __BIT(22);
   5108 				CSR_WRITE(sc, WMREG_GCR, reg);
   5109 
   5110 				/*
   5111 				 * Apply workaround for hardware errata
   5112 				 * documented in errata docs Fixes issue where
   5113 				 * some error prone or unreliable PCIe
   5114 				 * completions are occurring, particularly
   5115 				 * with ASPM enabled. Without fix, issue can
   5116 				 * cause Tx timeouts.
   5117 				 */
   5118 				reg = CSR_READ(sc, WMREG_GCR2);
   5119 				reg |= __BIT(0);
   5120 				CSR_WRITE(sc, WMREG_GCR2, reg);
   5121 			}
   5122 			break;
   5123 		case WM_T_80003:
   5124 			/* TARC0 */
   5125 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   5126 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   5127 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   5128 
   5129 			/* TARC1 bit 28 */
   5130 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5131 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5132 				tarc1 &= ~__BIT(28);
   5133 			else
   5134 				tarc1 |= __BIT(28);
   5135 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5136 			break;
   5137 		case WM_T_ICH8:
   5138 		case WM_T_ICH9:
   5139 		case WM_T_ICH10:
   5140 		case WM_T_PCH:
   5141 		case WM_T_PCH2:
   5142 		case WM_T_PCH_LPT:
   5143 		case WM_T_PCH_SPT:
   5144 		case WM_T_PCH_CNP:
   5145 			/* TARC0 */
   5146 			if (sc->sc_type == WM_T_ICH8) {
   5147 				/* Set TARC0 bits 29 and 28 */
   5148 				tarc0 |= __BITS(29, 28);
   5149 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   5150 				tarc0 |= __BIT(29);
   5151 				/*
   5152 				 *  Drop bit 28. From Linux.
   5153 				 * See I218/I219 spec update
   5154 				 * "5. Buffer Overrun While the I219 is
   5155 				 * Processing DMA Transactions"
   5156 				 */
   5157 				tarc0 &= ~__BIT(28);
   5158 			}
   5159 			/* Set TARC0 bits 23,24,26,27 */
   5160 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   5161 
   5162 			/* CTRL_EXT */
   5163 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5164 			reg |= __BIT(22);	/* Set bit 22 */
   5165 			/*
   5166 			 * Enable PHY low-power state when MAC is at D3
   5167 			 * w/o WoL
   5168 			 */
   5169 			if (sc->sc_type >= WM_T_PCH)
   5170 				reg |= CTRL_EXT_PHYPDEN;
   5171 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5172 
   5173 			/* TARC1 */
   5174 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5175 			/* bit 28 */
   5176 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5177 				tarc1 &= ~__BIT(28);
   5178 			else
   5179 				tarc1 |= __BIT(28);
   5180 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   5181 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5182 
   5183 			/* Device Status */
   5184 			if (sc->sc_type == WM_T_ICH8) {
   5185 				reg = CSR_READ(sc, WMREG_STATUS);
   5186 				reg &= ~__BIT(31);
   5187 				CSR_WRITE(sc, WMREG_STATUS, reg);
   5188 
   5189 			}
   5190 
   5191 			/* IOSFPC */
   5192 			if (sc->sc_type == WM_T_PCH_SPT) {
   5193 				reg = CSR_READ(sc, WMREG_IOSFPC);
   5194 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   5195 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   5196 			}
   5197 			/*
   5198 			 * Work-around descriptor data corruption issue during
   5199 			 * NFS v2 UDP traffic, just disable the NFS filtering
   5200 			 * capability.
   5201 			 */
   5202 			reg = CSR_READ(sc, WMREG_RFCTL);
   5203 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   5204 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5205 			break;
   5206 		default:
   5207 			break;
   5208 		}
   5209 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   5210 
   5211 		switch (sc->sc_type) {
   5212 		case WM_T_82571:
   5213 		case WM_T_82572:
   5214 		case WM_T_82573:
   5215 		case WM_T_80003:
   5216 		case WM_T_ICH8:
   5217 			/*
   5218 			 * 8257[12] Errata No.52, 82573 Errata No.43 and some
   5219 			 * others to avoid RSS Hash Value bug.
   5220 			 */
   5221 			reg = CSR_READ(sc, WMREG_RFCTL);
   5222 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   5223 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5224 			break;
   5225 		case WM_T_82574:
   5226 			/* Use extened Rx descriptor. */
   5227 			reg = CSR_READ(sc, WMREG_RFCTL);
   5228 			reg |= WMREG_RFCTL_EXSTEN;
   5229 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5230 			break;
   5231 		default:
   5232 			break;
   5233 		}
   5234 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   5235 		/*
   5236 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   5237 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   5238 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   5239 		 * Correctly by the Device"
   5240 		 *
   5241 		 * I354(C2000) Errata AVR53:
   5242 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   5243 		 * Hang"
   5244 		 */
   5245 		reg = CSR_READ(sc, WMREG_RFCTL);
   5246 		reg |= WMREG_RFCTL_IPV6EXDIS;
   5247 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   5248 	}
   5249 }
   5250 
   5251 static uint32_t
   5252 wm_rxpbs_adjust_82580(uint32_t val)
   5253 {
   5254 	uint32_t rv = 0;
   5255 
   5256 	if (val < __arraycount(wm_82580_rxpbs_table))
   5257 		rv = wm_82580_rxpbs_table[val];
   5258 
   5259 	return rv;
   5260 }
   5261 
   5262 /*
   5263  * wm_reset_phy:
   5264  *
   5265  *	generic PHY reset function.
   5266  *	Same as e1000_phy_hw_reset_generic()
   5267  */
   5268 static int
   5269 wm_reset_phy(struct wm_softc *sc)
   5270 {
   5271 	uint32_t reg;
   5272 	int rv;
   5273 
   5274 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5275 		device_xname(sc->sc_dev), __func__));
   5276 	if (wm_phy_resetisblocked(sc))
   5277 		return -1;
   5278 
   5279 	rv = sc->phy.acquire(sc);
   5280 	if (rv) {
   5281 		device_printf(sc->sc_dev, "%s: failed to acquire phy: %d\n",
   5282 		    __func__, rv);
   5283 		return rv;
   5284 	}
   5285 
   5286 	reg = CSR_READ(sc, WMREG_CTRL);
   5287 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   5288 	CSR_WRITE_FLUSH(sc);
   5289 
   5290 	delay(sc->phy.reset_delay_us);
   5291 
   5292 	CSR_WRITE(sc, WMREG_CTRL, reg);
   5293 	CSR_WRITE_FLUSH(sc);
   5294 
   5295 	delay(150);
   5296 
   5297 	sc->phy.release(sc);
   5298 
   5299 	wm_get_cfg_done(sc);
   5300 	wm_phy_post_reset(sc);
   5301 
   5302 	return 0;
   5303 }
   5304 
   5305 /*
   5306  * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
   5307  *
   5308  * In i219, the descriptor rings must be emptied before resetting the HW
   5309  * or before changing the device state to D3 during runtime (runtime PM).
   5310  *
   5311  * Failure to do this will cause the HW to enter a unit hang state which can
   5312  * only be released by PCI reset on the device.
   5313  *
   5314  * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
   5315  */
   5316 static void
   5317 wm_flush_desc_rings(struct wm_softc *sc)
   5318 {
   5319 	pcireg_t preg;
   5320 	uint32_t reg;
   5321 	struct wm_txqueue *txq;
   5322 	wiseman_txdesc_t *txd;
   5323 	int nexttx;
   5324 	uint32_t rctl;
   5325 
   5326 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
   5327 
   5328 	/* First, disable MULR fix in FEXTNVM11 */
   5329 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   5330 	reg |= FEXTNVM11_DIS_MULRFIX;
   5331 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   5332 
   5333 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5334 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   5335 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   5336 		return;
   5337 
   5338 	/*
   5339 	 * Remove all descriptors from the tx_ring.
   5340 	 *
   5341 	 * We want to clear all pending descriptors from the TX ring. Zeroing
   5342 	 * happens when the HW reads the regs. We assign the ring itself as
   5343 	 * the data of the next descriptor. We don't care about the data we are
   5344 	 * about to reset the HW.
   5345 	 */
   5346 #ifdef WM_DEBUG
   5347 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
   5348 #endif
   5349 	reg = CSR_READ(sc, WMREG_TCTL);
   5350 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   5351 
   5352 	txq = &sc->sc_queue[0].wmq_txq;
   5353 	nexttx = txq->txq_next;
   5354 	txd = &txq->txq_descs[nexttx];
   5355 	wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
   5356 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   5357 	txd->wtx_fields.wtxu_status = 0;
   5358 	txd->wtx_fields.wtxu_options = 0;
   5359 	txd->wtx_fields.wtxu_vlan = 0;
   5360 
   5361 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5362 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5363 
   5364 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   5365 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   5366 	CSR_WRITE_FLUSH(sc);
   5367 	delay(250);
   5368 
   5369 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5370 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   5371 		return;
   5372 
   5373 	/*
   5374 	 * Mark all descriptors in the RX ring as consumed and disable the
   5375 	 * rx ring.
   5376 	 */
   5377 #ifdef WM_DEBUG
   5378 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   5379 #endif
   5380 	rctl = CSR_READ(sc, WMREG_RCTL);
   5381 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5382 	CSR_WRITE_FLUSH(sc);
   5383 	delay(150);
   5384 
   5385 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   5386 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   5387 	reg &= 0xffffc000;
   5388 	/*
   5389 	 * Update thresholds: prefetch threshold to 31, host threshold
   5390 	 * to 1 and make sure the granularity is "descriptors" and not
   5391 	 * "cache lines"
   5392 	 */
   5393 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   5394 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   5395 
   5396 	/* Momentarily enable the RX ring for the changes to take effect */
   5397 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   5398 	CSR_WRITE_FLUSH(sc);
   5399 	delay(150);
   5400 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5401 }
   5402 
   5403 /*
   5404  * wm_reset:
   5405  *
   5406  *	Reset the i82542 chip.
   5407  */
   5408 static void
   5409 wm_reset(struct wm_softc *sc)
   5410 {
   5411 	int phy_reset = 0;
   5412 	int i, error = 0;
   5413 	uint32_t reg;
   5414 	uint16_t kmreg;
   5415 	int rv;
   5416 
   5417 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5418 		device_xname(sc->sc_dev), __func__));
   5419 	KASSERT(sc->sc_type != 0);
   5420 
   5421 	/*
   5422 	 * Allocate on-chip memory according to the MTU size.
   5423 	 * The Packet Buffer Allocation register must be written
   5424 	 * before the chip is reset.
   5425 	 */
   5426 	switch (sc->sc_type) {
   5427 	case WM_T_82547:
   5428 	case WM_T_82547_2:
   5429 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5430 		    PBA_22K : PBA_30K;
   5431 		for (i = 0; i < sc->sc_nqueues; i++) {
   5432 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5433 			txq->txq_fifo_head = 0;
   5434 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   5435 			txq->txq_fifo_size =
   5436 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   5437 			txq->txq_fifo_stall = 0;
   5438 		}
   5439 		break;
   5440 	case WM_T_82571:
   5441 	case WM_T_82572:
   5442 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   5443 	case WM_T_80003:
   5444 		sc->sc_pba = PBA_32K;
   5445 		break;
   5446 	case WM_T_82573:
   5447 		sc->sc_pba = PBA_12K;
   5448 		break;
   5449 	case WM_T_82574:
   5450 	case WM_T_82583:
   5451 		sc->sc_pba = PBA_20K;
   5452 		break;
   5453 	case WM_T_82576:
   5454 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   5455 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   5456 		break;
   5457 	case WM_T_82580:
   5458 	case WM_T_I350:
   5459 	case WM_T_I354:
   5460 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   5461 		break;
   5462 	case WM_T_I210:
   5463 	case WM_T_I211:
   5464 		sc->sc_pba = PBA_34K;
   5465 		break;
   5466 	case WM_T_ICH8:
   5467 		/* Workaround for a bit corruption issue in FIFO memory */
   5468 		sc->sc_pba = PBA_8K;
   5469 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   5470 		break;
   5471 	case WM_T_ICH9:
   5472 	case WM_T_ICH10:
   5473 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   5474 		    PBA_14K : PBA_10K;
   5475 		break;
   5476 	case WM_T_PCH:
   5477 	case WM_T_PCH2:	/* XXX 14K? */
   5478 	case WM_T_PCH_LPT:
   5479 	case WM_T_PCH_SPT:
   5480 	case WM_T_PCH_CNP:
   5481 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
   5482 		    PBA_12K : PBA_26K;
   5483 		break;
   5484 	default:
   5485 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5486 		    PBA_40K : PBA_48K;
   5487 		break;
   5488 	}
   5489 	/*
   5490 	 * Only old or non-multiqueue devices have the PBA register
   5491 	 * XXX Need special handling for 82575.
   5492 	 */
   5493 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5494 	    || (sc->sc_type == WM_T_82575))
   5495 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   5496 
   5497 	/* Prevent the PCI-E bus from sticking */
   5498 	if (sc->sc_flags & WM_F_PCIE) {
   5499 		int timeout = 800;
   5500 
   5501 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   5502 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5503 
   5504 		while (timeout--) {
   5505 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   5506 			    == 0)
   5507 				break;
   5508 			delay(100);
   5509 		}
   5510 		if (timeout == 0)
   5511 			device_printf(sc->sc_dev,
   5512 			    "failed to disable bus mastering\n");
   5513 	}
   5514 
   5515 	/* Set the completion timeout for interface */
   5516 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   5517 	    || (sc->sc_type == WM_T_82580)
   5518 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5519 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   5520 		wm_set_pcie_completion_timeout(sc);
   5521 
   5522 	/* Clear interrupt */
   5523 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5524 	if (wm_is_using_msix(sc)) {
   5525 		if (sc->sc_type != WM_T_82574) {
   5526 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5527 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5528 		} else
   5529 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5530 	}
   5531 
   5532 	/* Stop the transmit and receive processes. */
   5533 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5534 	sc->sc_rctl &= ~RCTL_EN;
   5535 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   5536 	CSR_WRITE_FLUSH(sc);
   5537 
   5538 	/* XXX set_tbi_sbp_82543() */
   5539 
   5540 	delay(10*1000);
   5541 
   5542 	/* Must acquire the MDIO ownership before MAC reset */
   5543 	switch (sc->sc_type) {
   5544 	case WM_T_82573:
   5545 	case WM_T_82574:
   5546 	case WM_T_82583:
   5547 		error = wm_get_hw_semaphore_82573(sc);
   5548 		break;
   5549 	default:
   5550 		break;
   5551 	}
   5552 
   5553 	/*
   5554 	 * 82541 Errata 29? & 82547 Errata 28?
   5555 	 * See also the description about PHY_RST bit in CTRL register
   5556 	 * in 8254x_GBe_SDM.pdf.
   5557 	 */
   5558 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   5559 		CSR_WRITE(sc, WMREG_CTRL,
   5560 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   5561 		CSR_WRITE_FLUSH(sc);
   5562 		delay(5000);
   5563 	}
   5564 
   5565 	switch (sc->sc_type) {
   5566 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   5567 	case WM_T_82541:
   5568 	case WM_T_82541_2:
   5569 	case WM_T_82547:
   5570 	case WM_T_82547_2:
   5571 		/*
   5572 		 * On some chipsets, a reset through a memory-mapped write
   5573 		 * cycle can cause the chip to reset before completing the
   5574 		 * write cycle. This causes major headache that can be avoided
   5575 		 * by issuing the reset via indirect register writes through
   5576 		 * I/O space.
   5577 		 *
   5578 		 * So, if we successfully mapped the I/O BAR at attach time,
   5579 		 * use that. Otherwise, try our luck with a memory-mapped
   5580 		 * reset.
   5581 		 */
   5582 		if (sc->sc_flags & WM_F_IOH_VALID)
   5583 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   5584 		else
   5585 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   5586 		break;
   5587 	case WM_T_82545_3:
   5588 	case WM_T_82546_3:
   5589 		/* Use the shadow control register on these chips. */
   5590 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   5591 		break;
   5592 	case WM_T_80003:
   5593 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5594 		if (sc->phy.acquire(sc) != 0)
   5595 			break;
   5596 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5597 		sc->phy.release(sc);
   5598 		break;
   5599 	case WM_T_ICH8:
   5600 	case WM_T_ICH9:
   5601 	case WM_T_ICH10:
   5602 	case WM_T_PCH:
   5603 	case WM_T_PCH2:
   5604 	case WM_T_PCH_LPT:
   5605 	case WM_T_PCH_SPT:
   5606 	case WM_T_PCH_CNP:
   5607 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5608 		if (wm_phy_resetisblocked(sc) == false) {
   5609 			/*
   5610 			 * Gate automatic PHY configuration by hardware on
   5611 			 * non-managed 82579
   5612 			 */
   5613 			if ((sc->sc_type == WM_T_PCH2)
   5614 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   5615 				== 0))
   5616 				wm_gate_hw_phy_config_ich8lan(sc, true);
   5617 
   5618 			reg |= CTRL_PHY_RESET;
   5619 			phy_reset = 1;
   5620 		} else
   5621 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   5622 		if (sc->phy.acquire(sc) != 0)
   5623 			break;
   5624 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5625 		/* Don't insert a completion barrier when reset */
   5626 		delay(20*1000);
   5627 		/*
   5628 		 * The EXTCNFCTR_MDIO_SW_OWNERSHIP bit is cleared by the reset,
   5629 		 * so don't use sc->phy.release(sc). Release sc_ich_phymtx
   5630 		 * only. See also wm_get_swflag_ich8lan().
   5631 		 */
   5632 		mutex_exit(sc->sc_ich_phymtx);
   5633 		break;
   5634 	case WM_T_82580:
   5635 	case WM_T_I350:
   5636 	case WM_T_I354:
   5637 	case WM_T_I210:
   5638 	case WM_T_I211:
   5639 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5640 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5641 			CSR_WRITE_FLUSH(sc);
   5642 		delay(5000);
   5643 		break;
   5644 	case WM_T_82542_2_0:
   5645 	case WM_T_82542_2_1:
   5646 	case WM_T_82543:
   5647 	case WM_T_82540:
   5648 	case WM_T_82545:
   5649 	case WM_T_82546:
   5650 	case WM_T_82571:
   5651 	case WM_T_82572:
   5652 	case WM_T_82573:
   5653 	case WM_T_82574:
   5654 	case WM_T_82575:
   5655 	case WM_T_82576:
   5656 	case WM_T_82583:
   5657 	default:
   5658 		/* Everything else can safely use the documented method. */
   5659 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5660 		break;
   5661 	}
   5662 
   5663 	/* Must release the MDIO ownership after MAC reset */
   5664 	switch (sc->sc_type) {
   5665 	case WM_T_82573:
   5666 	case WM_T_82574:
   5667 	case WM_T_82583:
   5668 		if (error == 0)
   5669 			wm_put_hw_semaphore_82573(sc);
   5670 		break;
   5671 	default:
   5672 		break;
   5673 	}
   5674 
   5675 	/* Set Phy Config Counter to 50msec */
   5676 	if (sc->sc_type == WM_T_PCH2) {
   5677 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5678 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5679 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5680 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5681 	}
   5682 
   5683 	if (phy_reset != 0)
   5684 		wm_get_cfg_done(sc);
   5685 
   5686 	/* Reload EEPROM */
   5687 	switch (sc->sc_type) {
   5688 	case WM_T_82542_2_0:
   5689 	case WM_T_82542_2_1:
   5690 	case WM_T_82543:
   5691 	case WM_T_82544:
   5692 		delay(10);
   5693 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5694 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5695 		CSR_WRITE_FLUSH(sc);
   5696 		delay(2000);
   5697 		break;
   5698 	case WM_T_82540:
   5699 	case WM_T_82545:
   5700 	case WM_T_82545_3:
   5701 	case WM_T_82546:
   5702 	case WM_T_82546_3:
   5703 		delay(5*1000);
   5704 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5705 		break;
   5706 	case WM_T_82541:
   5707 	case WM_T_82541_2:
   5708 	case WM_T_82547:
   5709 	case WM_T_82547_2:
   5710 		delay(20000);
   5711 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5712 		break;
   5713 	case WM_T_82571:
   5714 	case WM_T_82572:
   5715 	case WM_T_82573:
   5716 	case WM_T_82574:
   5717 	case WM_T_82583:
   5718 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5719 			delay(10);
   5720 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5721 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5722 			CSR_WRITE_FLUSH(sc);
   5723 		}
   5724 		/* check EECD_EE_AUTORD */
   5725 		wm_get_auto_rd_done(sc);
   5726 		/*
   5727 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5728 		 * is set.
   5729 		 */
   5730 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5731 		    || (sc->sc_type == WM_T_82583))
   5732 			delay(25*1000);
   5733 		break;
   5734 	case WM_T_82575:
   5735 	case WM_T_82576:
   5736 	case WM_T_82580:
   5737 	case WM_T_I350:
   5738 	case WM_T_I354:
   5739 	case WM_T_I210:
   5740 	case WM_T_I211:
   5741 	case WM_T_80003:
   5742 		/* check EECD_EE_AUTORD */
   5743 		wm_get_auto_rd_done(sc);
   5744 		break;
   5745 	case WM_T_ICH8:
   5746 	case WM_T_ICH9:
   5747 	case WM_T_ICH10:
   5748 	case WM_T_PCH:
   5749 	case WM_T_PCH2:
   5750 	case WM_T_PCH_LPT:
   5751 	case WM_T_PCH_SPT:
   5752 	case WM_T_PCH_CNP:
   5753 		break;
   5754 	default:
   5755 		panic("%s: unknown type\n", __func__);
   5756 	}
   5757 
   5758 	/* Check whether EEPROM is present or not */
   5759 	switch (sc->sc_type) {
   5760 	case WM_T_82575:
   5761 	case WM_T_82576:
   5762 	case WM_T_82580:
   5763 	case WM_T_I350:
   5764 	case WM_T_I354:
   5765 	case WM_T_ICH8:
   5766 	case WM_T_ICH9:
   5767 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5768 			/* Not found */
   5769 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5770 			if (sc->sc_type == WM_T_82575)
   5771 				wm_reset_init_script_82575(sc);
   5772 		}
   5773 		break;
   5774 	default:
   5775 		break;
   5776 	}
   5777 
   5778 	if (phy_reset != 0)
   5779 		wm_phy_post_reset(sc);
   5780 
   5781 	if ((sc->sc_type == WM_T_82580)
   5782 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5783 		/* Clear global device reset status bit */
   5784 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5785 	}
   5786 
   5787 	/* Clear any pending interrupt events. */
   5788 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5789 	reg = CSR_READ(sc, WMREG_ICR);
   5790 	if (wm_is_using_msix(sc)) {
   5791 		if (sc->sc_type != WM_T_82574) {
   5792 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5793 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5794 		} else
   5795 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5796 	}
   5797 
   5798 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5799 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5800 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5801 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5802 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5803 		reg |= KABGTXD_BGSQLBIAS;
   5804 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5805 	}
   5806 
   5807 	/* Reload sc_ctrl */
   5808 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5809 
   5810 	wm_set_eee(sc);
   5811 
   5812 	/*
   5813 	 * For PCH, this write will make sure that any noise will be detected
   5814 	 * as a CRC error and be dropped rather than show up as a bad packet
   5815 	 * to the DMA engine
   5816 	 */
   5817 	if (sc->sc_type == WM_T_PCH)
   5818 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5819 
   5820 	if (sc->sc_type >= WM_T_82544)
   5821 		CSR_WRITE(sc, WMREG_WUC, 0);
   5822 
   5823 	if (sc->sc_type < WM_T_82575)
   5824 		wm_disable_aspm(sc); /* Workaround for some chips */
   5825 
   5826 	wm_reset_mdicnfg_82580(sc);
   5827 
   5828 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5829 		wm_pll_workaround_i210(sc);
   5830 
   5831 	if (sc->sc_type == WM_T_80003) {
   5832 		/* Default to TRUE to enable the MDIC W/A */
   5833 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5834 
   5835 		rv = wm_kmrn_readreg(sc,
   5836 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5837 		if (rv == 0) {
   5838 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5839 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5840 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5841 			else
   5842 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5843 		}
   5844 	}
   5845 }
   5846 
   5847 /*
   5848  * wm_add_rxbuf:
   5849  *
   5850  *	Add a receive buffer to the indiciated descriptor.
   5851  */
   5852 static int
   5853 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5854 {
   5855 	struct wm_softc *sc = rxq->rxq_sc;
   5856 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5857 	struct mbuf *m;
   5858 	int error;
   5859 
   5860 	KASSERT(mutex_owned(rxq->rxq_lock));
   5861 
   5862 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5863 	if (m == NULL)
   5864 		return ENOBUFS;
   5865 
   5866 	MCLGET(m, M_DONTWAIT);
   5867 	if ((m->m_flags & M_EXT) == 0) {
   5868 		m_freem(m);
   5869 		return ENOBUFS;
   5870 	}
   5871 
   5872 	if (rxs->rxs_mbuf != NULL)
   5873 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5874 
   5875 	rxs->rxs_mbuf = m;
   5876 
   5877 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5878 	/*
   5879 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5880 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5881 	 */
   5882 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5883 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5884 	if (error) {
   5885 		/* XXX XXX XXX */
   5886 		aprint_error_dev(sc->sc_dev,
   5887 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5888 		panic("wm_add_rxbuf");
   5889 	}
   5890 
   5891 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5892 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5893 
   5894 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5895 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5896 			wm_init_rxdesc(rxq, idx);
   5897 	} else
   5898 		wm_init_rxdesc(rxq, idx);
   5899 
   5900 	return 0;
   5901 }
   5902 
   5903 /*
   5904  * wm_rxdrain:
   5905  *
   5906  *	Drain the receive queue.
   5907  */
   5908 static void
   5909 wm_rxdrain(struct wm_rxqueue *rxq)
   5910 {
   5911 	struct wm_softc *sc = rxq->rxq_sc;
   5912 	struct wm_rxsoft *rxs;
   5913 	int i;
   5914 
   5915 	KASSERT(mutex_owned(rxq->rxq_lock));
   5916 
   5917 	for (i = 0; i < WM_NRXDESC; i++) {
   5918 		rxs = &rxq->rxq_soft[i];
   5919 		if (rxs->rxs_mbuf != NULL) {
   5920 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5921 			m_freem(rxs->rxs_mbuf);
   5922 			rxs->rxs_mbuf = NULL;
   5923 		}
   5924 	}
   5925 }
   5926 
   5927 /*
   5928  * Setup registers for RSS.
   5929  *
   5930  * XXX not yet VMDq support
   5931  */
   5932 static void
   5933 wm_init_rss(struct wm_softc *sc)
   5934 {
   5935 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5936 	int i;
   5937 
   5938 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5939 
   5940 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5941 		unsigned int qid, reta_ent;
   5942 
   5943 		qid  = i % sc->sc_nqueues;
   5944 		switch (sc->sc_type) {
   5945 		case WM_T_82574:
   5946 			reta_ent = __SHIFTIN(qid,
   5947 			    RETA_ENT_QINDEX_MASK_82574);
   5948 			break;
   5949 		case WM_T_82575:
   5950 			reta_ent = __SHIFTIN(qid,
   5951 			    RETA_ENT_QINDEX1_MASK_82575);
   5952 			break;
   5953 		default:
   5954 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5955 			break;
   5956 		}
   5957 
   5958 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5959 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5960 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5961 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5962 	}
   5963 
   5964 	rss_getkey((uint8_t *)rss_key);
   5965 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5966 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5967 
   5968 	if (sc->sc_type == WM_T_82574)
   5969 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5970 	else
   5971 		mrqc = MRQC_ENABLE_RSS_MQ;
   5972 
   5973 	/*
   5974 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5975 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5976 	 */
   5977 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5978 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5979 #if 0
   5980 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5981 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   5982 #endif
   5983 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   5984 
   5985 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5986 }
   5987 
   5988 /*
   5989  * Adjust TX and RX queue numbers which the system actulally uses.
   5990  *
   5991  * The numbers are affected by below parameters.
   5992  *     - The nubmer of hardware queues
   5993  *     - The number of MSI-X vectors (= "nvectors" argument)
   5994  *     - ncpu
   5995  */
   5996 static void
   5997 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5998 {
   5999 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   6000 
   6001 	if (nvectors < 2) {
   6002 		sc->sc_nqueues = 1;
   6003 		return;
   6004 	}
   6005 
   6006 	switch (sc->sc_type) {
   6007 	case WM_T_82572:
   6008 		hw_ntxqueues = 2;
   6009 		hw_nrxqueues = 2;
   6010 		break;
   6011 	case WM_T_82574:
   6012 		hw_ntxqueues = 2;
   6013 		hw_nrxqueues = 2;
   6014 		break;
   6015 	case WM_T_82575:
   6016 		hw_ntxqueues = 4;
   6017 		hw_nrxqueues = 4;
   6018 		break;
   6019 	case WM_T_82576:
   6020 		hw_ntxqueues = 16;
   6021 		hw_nrxqueues = 16;
   6022 		break;
   6023 	case WM_T_82580:
   6024 	case WM_T_I350:
   6025 	case WM_T_I354:
   6026 		hw_ntxqueues = 8;
   6027 		hw_nrxqueues = 8;
   6028 		break;
   6029 	case WM_T_I210:
   6030 		hw_ntxqueues = 4;
   6031 		hw_nrxqueues = 4;
   6032 		break;
   6033 	case WM_T_I211:
   6034 		hw_ntxqueues = 2;
   6035 		hw_nrxqueues = 2;
   6036 		break;
   6037 		/*
   6038 		 * The below Ethernet controllers do not support MSI-X;
   6039 		 * this driver doesn't let them use multiqueue.
   6040 		 *     - WM_T_80003
   6041 		 *     - WM_T_ICH8
   6042 		 *     - WM_T_ICH9
   6043 		 *     - WM_T_ICH10
   6044 		 *     - WM_T_PCH
   6045 		 *     - WM_T_PCH2
   6046 		 *     - WM_T_PCH_LPT
   6047 		 */
   6048 	default:
   6049 		hw_ntxqueues = 1;
   6050 		hw_nrxqueues = 1;
   6051 		break;
   6052 	}
   6053 
   6054 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   6055 
   6056 	/*
   6057 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   6058 	 * the number of queues used actually.
   6059 	 */
   6060 	if (nvectors < hw_nqueues + 1)
   6061 		sc->sc_nqueues = nvectors - 1;
   6062 	else
   6063 		sc->sc_nqueues = hw_nqueues;
   6064 
   6065 	/*
   6066 	 * As queues more than CPUs cannot improve scaling, we limit
   6067 	 * the number of queues used actually.
   6068 	 */
   6069 	if (ncpu < sc->sc_nqueues)
   6070 		sc->sc_nqueues = ncpu;
   6071 }
   6072 
   6073 static inline bool
   6074 wm_is_using_msix(struct wm_softc *sc)
   6075 {
   6076 
   6077 	return (sc->sc_nintrs > 1);
   6078 }
   6079 
   6080 static inline bool
   6081 wm_is_using_multiqueue(struct wm_softc *sc)
   6082 {
   6083 
   6084 	return (sc->sc_nqueues > 1);
   6085 }
   6086 
   6087 static int
   6088 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   6089 {
   6090 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   6091 
   6092 	wmq->wmq_id = qidx;
   6093 	wmq->wmq_intr_idx = intr_idx;
   6094 	wmq->wmq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
   6095 	    wm_handle_queue, wmq);
   6096 	if (wmq->wmq_si != NULL)
   6097 		return 0;
   6098 
   6099 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   6100 	    wmq->wmq_id);
   6101 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   6102 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6103 	return ENOMEM;
   6104 }
   6105 
   6106 /*
   6107  * Both single interrupt MSI and INTx can use this function.
   6108  */
   6109 static int
   6110 wm_setup_legacy(struct wm_softc *sc)
   6111 {
   6112 	pci_chipset_tag_t pc = sc->sc_pc;
   6113 	const char *intrstr = NULL;
   6114 	char intrbuf[PCI_INTRSTR_LEN];
   6115 	int error;
   6116 
   6117 	error = wm_alloc_txrx_queues(sc);
   6118 	if (error) {
   6119 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6120 		    error);
   6121 		return ENOMEM;
   6122 	}
   6123 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   6124 	    sizeof(intrbuf));
   6125 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   6126 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   6127 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   6128 	if (sc->sc_ihs[0] == NULL) {
   6129 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   6130 		    (pci_intr_type(pc, sc->sc_intrs[0])
   6131 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   6132 		return ENOMEM;
   6133 	}
   6134 
   6135 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   6136 	sc->sc_nintrs = 1;
   6137 
   6138 	return wm_softint_establish_queue(sc, 0, 0);
   6139 }
   6140 
   6141 static int
   6142 wm_setup_msix(struct wm_softc *sc)
   6143 {
   6144 	void *vih;
   6145 	kcpuset_t *affinity;
   6146 	int qidx, error, intr_idx, txrx_established;
   6147 	pci_chipset_tag_t pc = sc->sc_pc;
   6148 	const char *intrstr = NULL;
   6149 	char intrbuf[PCI_INTRSTR_LEN];
   6150 	char intr_xname[INTRDEVNAMEBUF];
   6151 
   6152 	if (sc->sc_nqueues < ncpu) {
   6153 		/*
   6154 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   6155 		 * interrupts start from CPU#1.
   6156 		 */
   6157 		sc->sc_affinity_offset = 1;
   6158 	} else {
   6159 		/*
   6160 		 * In this case, this device use all CPUs. So, we unify
   6161 		 * affinitied cpu_index to msix vector number for readability.
   6162 		 */
   6163 		sc->sc_affinity_offset = 0;
   6164 	}
   6165 
   6166 	error = wm_alloc_txrx_queues(sc);
   6167 	if (error) {
   6168 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6169 		    error);
   6170 		return ENOMEM;
   6171 	}
   6172 
   6173 	kcpuset_create(&affinity, false);
   6174 	intr_idx = 0;
   6175 
   6176 	/*
   6177 	 * TX and RX
   6178 	 */
   6179 	txrx_established = 0;
   6180 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6181 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6182 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   6183 
   6184 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6185 		    sizeof(intrbuf));
   6186 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   6187 		    PCI_INTR_MPSAFE, true);
   6188 		memset(intr_xname, 0, sizeof(intr_xname));
   6189 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   6190 		    device_xname(sc->sc_dev), qidx);
   6191 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6192 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   6193 		if (vih == NULL) {
   6194 			aprint_error_dev(sc->sc_dev,
   6195 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   6196 			    intrstr ? " at " : "",
   6197 			    intrstr ? intrstr : "");
   6198 
   6199 			goto fail;
   6200 		}
   6201 		kcpuset_zero(affinity);
   6202 		/* Round-robin affinity */
   6203 		kcpuset_set(affinity, affinity_to);
   6204 		error = interrupt_distribute(vih, affinity, NULL);
   6205 		if (error == 0) {
   6206 			aprint_normal_dev(sc->sc_dev,
   6207 			    "for TX and RX interrupting at %s affinity to %u\n",
   6208 			    intrstr, affinity_to);
   6209 		} else {
   6210 			aprint_normal_dev(sc->sc_dev,
   6211 			    "for TX and RX interrupting at %s\n", intrstr);
   6212 		}
   6213 		sc->sc_ihs[intr_idx] = vih;
   6214 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   6215 			goto fail;
   6216 		txrx_established++;
   6217 		intr_idx++;
   6218 	}
   6219 
   6220 	/* LINK */
   6221 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6222 	    sizeof(intrbuf));
   6223 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   6224 	memset(intr_xname, 0, sizeof(intr_xname));
   6225 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   6226 	    device_xname(sc->sc_dev));
   6227 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6228 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   6229 	if (vih == NULL) {
   6230 		aprint_error_dev(sc->sc_dev,
   6231 		    "unable to establish MSI-X(for LINK)%s%s\n",
   6232 		    intrstr ? " at " : "",
   6233 		    intrstr ? intrstr : "");
   6234 
   6235 		goto fail;
   6236 	}
   6237 	/* Keep default affinity to LINK interrupt */
   6238 	aprint_normal_dev(sc->sc_dev,
   6239 	    "for LINK interrupting at %s\n", intrstr);
   6240 	sc->sc_ihs[intr_idx] = vih;
   6241 	sc->sc_link_intr_idx = intr_idx;
   6242 
   6243 	sc->sc_nintrs = sc->sc_nqueues + 1;
   6244 	kcpuset_destroy(affinity);
   6245 	return 0;
   6246 
   6247 fail:
   6248 	for (qidx = 0; qidx < txrx_established; qidx++) {
   6249 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6250 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   6251 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6252 	}
   6253 
   6254 	kcpuset_destroy(affinity);
   6255 	return ENOMEM;
   6256 }
   6257 
   6258 static void
   6259 wm_unset_stopping_flags(struct wm_softc *sc)
   6260 {
   6261 	int i;
   6262 
   6263 	KASSERT(mutex_owned(sc->sc_core_lock));
   6264 
   6265 	/* Must unset stopping flags in ascending order. */
   6266 	for (i = 0; i < sc->sc_nqueues; i++) {
   6267 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6268 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6269 
   6270 		mutex_enter(txq->txq_lock);
   6271 		txq->txq_stopping = false;
   6272 		mutex_exit(txq->txq_lock);
   6273 
   6274 		mutex_enter(rxq->rxq_lock);
   6275 		rxq->rxq_stopping = false;
   6276 		mutex_exit(rxq->rxq_lock);
   6277 	}
   6278 
   6279 	sc->sc_core_stopping = false;
   6280 }
   6281 
   6282 static void
   6283 wm_set_stopping_flags(struct wm_softc *sc)
   6284 {
   6285 	int i;
   6286 
   6287 	KASSERT(mutex_owned(sc->sc_core_lock));
   6288 
   6289 	sc->sc_core_stopping = true;
   6290 
   6291 	/* Must set stopping flags in ascending order. */
   6292 	for (i = 0; i < sc->sc_nqueues; i++) {
   6293 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6294 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6295 
   6296 		mutex_enter(rxq->rxq_lock);
   6297 		rxq->rxq_stopping = true;
   6298 		mutex_exit(rxq->rxq_lock);
   6299 
   6300 		mutex_enter(txq->txq_lock);
   6301 		txq->txq_stopping = true;
   6302 		mutex_exit(txq->txq_lock);
   6303 	}
   6304 }
   6305 
   6306 /*
   6307  * Write interrupt interval value to ITR or EITR
   6308  */
   6309 static void
   6310 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   6311 {
   6312 
   6313 	if (!wmq->wmq_set_itr)
   6314 		return;
   6315 
   6316 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6317 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   6318 
   6319 		/*
   6320 		 * 82575 doesn't have CNT_INGR field.
   6321 		 * So, overwrite counter field by software.
   6322 		 */
   6323 		if (sc->sc_type == WM_T_82575)
   6324 			eitr |= __SHIFTIN(wmq->wmq_itr,
   6325 			    EITR_COUNTER_MASK_82575);
   6326 		else
   6327 			eitr |= EITR_CNT_INGR;
   6328 
   6329 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   6330 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   6331 		/*
   6332 		 * 82574 has both ITR and EITR. SET EITR when we use
   6333 		 * the multi queue function with MSI-X.
   6334 		 */
   6335 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   6336 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   6337 	} else {
   6338 		KASSERT(wmq->wmq_id == 0);
   6339 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   6340 	}
   6341 
   6342 	wmq->wmq_set_itr = false;
   6343 }
   6344 
   6345 /*
   6346  * TODO
   6347  * Below dynamic calculation of itr is almost the same as Linux igb,
   6348  * however it does not fit to wm(4). So, we will have been disable AIM
   6349  * until we will find appropriate calculation of itr.
   6350  */
   6351 /*
   6352  * Calculate interrupt interval value to be going to write register in
   6353  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   6354  */
   6355 static void
   6356 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   6357 {
   6358 #ifdef NOTYET
   6359 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6360 	struct wm_txqueue *txq = &wmq->wmq_txq;
   6361 	uint32_t avg_size = 0;
   6362 	uint32_t new_itr;
   6363 
   6364 	if (rxq->rxq_packets)
   6365 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   6366 	if (txq->txq_packets)
   6367 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   6368 
   6369 	if (avg_size == 0) {
   6370 		new_itr = 450; /* restore default value */
   6371 		goto out;
   6372 	}
   6373 
   6374 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   6375 	avg_size += 24;
   6376 
   6377 	/* Don't starve jumbo frames */
   6378 	avg_size = uimin(avg_size, 3000);
   6379 
   6380 	/* Give a little boost to mid-size frames */
   6381 	if ((avg_size > 300) && (avg_size < 1200))
   6382 		new_itr = avg_size / 3;
   6383 	else
   6384 		new_itr = avg_size / 2;
   6385 
   6386 out:
   6387 	/*
   6388 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   6389 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   6390 	 */
   6391 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   6392 		new_itr *= 4;
   6393 
   6394 	if (new_itr != wmq->wmq_itr) {
   6395 		wmq->wmq_itr = new_itr;
   6396 		wmq->wmq_set_itr = true;
   6397 	} else
   6398 		wmq->wmq_set_itr = false;
   6399 
   6400 	rxq->rxq_packets = 0;
   6401 	rxq->rxq_bytes = 0;
   6402 	txq->txq_packets = 0;
   6403 	txq->txq_bytes = 0;
   6404 #endif
   6405 }
   6406 
   6407 static void
   6408 wm_init_sysctls(struct wm_softc *sc)
   6409 {
   6410 	struct sysctllog **log;
   6411 	const struct sysctlnode *rnode, *qnode, *cnode;
   6412 	int i, rv;
   6413 	const char *dvname;
   6414 
   6415 	log = &sc->sc_sysctllog;
   6416 	dvname = device_xname(sc->sc_dev);
   6417 
   6418 	rv = sysctl_createv(log, 0, NULL, &rnode,
   6419 	    0, CTLTYPE_NODE, dvname,
   6420 	    SYSCTL_DESCR("wm information and settings"),
   6421 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   6422 	if (rv != 0)
   6423 		goto err;
   6424 
   6425 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6426 	    CTLTYPE_BOOL, "txrx_workqueue",
   6427 	    SYSCTL_DESCR("Use workqueue for packet processing"),
   6428 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   6429 	if (rv != 0)
   6430 		goto teardown;
   6431 
   6432 	for (i = 0; i < sc->sc_nqueues; i++) {
   6433 		struct wm_queue *wmq = &sc->sc_queue[i];
   6434 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6435 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6436 
   6437 		snprintf(sc->sc_queue[i].sysctlname,
   6438 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
   6439 
   6440 		if (sysctl_createv(log, 0, &rnode, &qnode,
   6441 		    0, CTLTYPE_NODE,
   6442 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
   6443 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   6444 			break;
   6445 
   6446 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6447 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6448 		    "txq_free", SYSCTL_DESCR("TX queue free"),
   6449 		    NULL, 0, &txq->txq_free,
   6450 		    0, CTL_CREATE, CTL_EOL) != 0)
   6451 			break;
   6452 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6453 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6454 		    "txd_head", SYSCTL_DESCR("TX descriptor head"),
   6455 		    wm_sysctl_tdh_handler, 0, (void *)txq,
   6456 		    0, CTL_CREATE, CTL_EOL) != 0)
   6457 			break;
   6458 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6459 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6460 		    "txd_tail", SYSCTL_DESCR("TX descriptor tail"),
   6461 		    wm_sysctl_tdt_handler, 0, (void *)txq,
   6462 		    0, CTL_CREATE, CTL_EOL) != 0)
   6463 			break;
   6464 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6465 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6466 		    "txq_next", SYSCTL_DESCR("TX queue next"),
   6467 		    NULL, 0, &txq->txq_next,
   6468 		    0, CTL_CREATE, CTL_EOL) != 0)
   6469 			break;
   6470 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6471 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6472 		    "txq_sfree", SYSCTL_DESCR("TX queue sfree"),
   6473 		    NULL, 0, &txq->txq_sfree,
   6474 		    0, CTL_CREATE, CTL_EOL) != 0)
   6475 			break;
   6476 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6477 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6478 		    "txq_snext", SYSCTL_DESCR("TX queue snext"),
   6479 		    NULL, 0, &txq->txq_snext,
   6480 		    0, CTL_CREATE, CTL_EOL) != 0)
   6481 			break;
   6482 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6483 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6484 		    "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"),
   6485 		    NULL, 0, &txq->txq_sdirty,
   6486 		    0, CTL_CREATE, CTL_EOL) != 0)
   6487 			break;
   6488 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6489 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6490 		    "txq_flags", SYSCTL_DESCR("TX queue flags"),
   6491 		    NULL, 0, &txq->txq_flags,
   6492 		    0, CTL_CREATE, CTL_EOL) != 0)
   6493 			break;
   6494 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6495 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6496 		    "txq_stopping", SYSCTL_DESCR("TX queue stopping"),
   6497 		    NULL, 0, &txq->txq_stopping,
   6498 		    0, CTL_CREATE, CTL_EOL) != 0)
   6499 			break;
   6500 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6501 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6502 		    "txq_sending", SYSCTL_DESCR("TX queue sending"),
   6503 		    NULL, 0, &txq->txq_sending,
   6504 		    0, CTL_CREATE, CTL_EOL) != 0)
   6505 			break;
   6506 
   6507 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6508 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6509 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
   6510 		    NULL, 0, &rxq->rxq_ptr,
   6511 		    0, CTL_CREATE, CTL_EOL) != 0)
   6512 			break;
   6513 	}
   6514 
   6515 #ifdef WM_DEBUG
   6516 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6517 	    CTLTYPE_INT, "debug_flags",
   6518 	    SYSCTL_DESCR(
   6519 		    "Debug flags:\n"	\
   6520 		    "\t0x01 LINK\n"	\
   6521 		    "\t0x02 TX\n"	\
   6522 		    "\t0x04 RX\n"	\
   6523 		    "\t0x08 GMII\n"	\
   6524 		    "\t0x10 MANAGE\n"	\
   6525 		    "\t0x20 NVM\n"	\
   6526 		    "\t0x40 INIT\n"	\
   6527 		    "\t0x80 LOCK"),
   6528 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
   6529 	if (rv != 0)
   6530 		goto teardown;
   6531 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6532 	    CTLTYPE_BOOL, "trigger_reset",
   6533 	    SYSCTL_DESCR("Trigger an interface reset"),
   6534 	    NULL, 0, &sc->sc_trigger_reset, 0, CTL_CREATE, CTL_EOL);
   6535 	if (rv != 0)
   6536 		goto teardown;
   6537 #endif
   6538 
   6539 	return;
   6540 
   6541 teardown:
   6542 	sysctl_teardown(log);
   6543 err:
   6544 	sc->sc_sysctllog = NULL;
   6545 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   6546 	    __func__, rv);
   6547 }
   6548 
   6549 static void
   6550 wm_update_stats(struct wm_softc *sc)
   6551 {
   6552 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6553 	uint64_t crcerrs, algnerrc, symerrc, mpc, colc,  sec, rlec, rxerrc,
   6554 	    cexterr;
   6555 
   6556 	crcerrs = CSR_READ(sc, WMREG_CRCERRS);
   6557 	symerrc = CSR_READ(sc, WMREG_SYMERRC);
   6558 	mpc = CSR_READ(sc, WMREG_MPC);
   6559 	colc = CSR_READ(sc, WMREG_COLC);
   6560 	sec = CSR_READ(sc, WMREG_SEC);
   6561 	rlec = CSR_READ(sc, WMREG_RLEC);
   6562 
   6563 	WM_EVCNT_ADD(&sc->sc_ev_crcerrs, crcerrs);
   6564 	WM_EVCNT_ADD(&sc->sc_ev_symerrc, symerrc);
   6565 	WM_EVCNT_ADD(&sc->sc_ev_mpc, mpc);
   6566 	WM_EVCNT_ADD(&sc->sc_ev_colc, colc);
   6567 	WM_EVCNT_ADD(&sc->sc_ev_sec, sec);
   6568 	WM_EVCNT_ADD(&sc->sc_ev_rlec, rlec);
   6569 
   6570 	if (sc->sc_type >= WM_T_82543) {
   6571 		algnerrc = CSR_READ(sc, WMREG_ALGNERRC);
   6572 		rxerrc = CSR_READ(sc, WMREG_RXERRC);
   6573 		WM_EVCNT_ADD(&sc->sc_ev_algnerrc, algnerrc);
   6574 		WM_EVCNT_ADD(&sc->sc_ev_rxerrc, rxerrc);
   6575 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc)) {
   6576 			cexterr = CSR_READ(sc, WMREG_CEXTERR);
   6577 			WM_EVCNT_ADD(&sc->sc_ev_cexterr, cexterr);
   6578 		} else {
   6579 			cexterr = 0;
   6580 			/* Excessive collision + Link down */
   6581 			WM_EVCNT_ADD(&sc->sc_ev_htdpmc,
   6582 			    CSR_READ(sc, WMREG_HTDPMC));
   6583 		}
   6584 
   6585 		WM_EVCNT_ADD(&sc->sc_ev_tncrs, CSR_READ(sc, WMREG_TNCRS));
   6586 		WM_EVCNT_ADD(&sc->sc_ev_tsctc, CSR_READ(sc, WMREG_TSCTC));
   6587 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   6588 			WM_EVCNT_ADD(&sc->sc_ev_tsctfc,
   6589 			    CSR_READ(sc, WMREG_TSCTFC));
   6590 		else {
   6591 			WM_EVCNT_ADD(&sc->sc_ev_cbrmpc,
   6592 			    CSR_READ(sc, WMREG_CBRMPC));
   6593 		}
   6594 	} else
   6595 		algnerrc = rxerrc = cexterr = 0;
   6596 
   6597 	if (sc->sc_type >= WM_T_82542_2_1) {
   6598 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   6599 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   6600 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   6601 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   6602 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   6603 	}
   6604 
   6605 	WM_EVCNT_ADD(&sc->sc_ev_scc, CSR_READ(sc, WMREG_SCC));
   6606 	WM_EVCNT_ADD(&sc->sc_ev_ecol, CSR_READ(sc, WMREG_ECOL));
   6607 	WM_EVCNT_ADD(&sc->sc_ev_mcc, CSR_READ(sc, WMREG_MCC));
   6608 	WM_EVCNT_ADD(&sc->sc_ev_latecol, CSR_READ(sc, WMREG_LATECOL));
   6609 
   6610 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   6611 		WM_EVCNT_ADD(&sc->sc_ev_cbtmpc, CSR_READ(sc, WMREG_CBTMPC));
   6612 	}
   6613 
   6614 	WM_EVCNT_ADD(&sc->sc_ev_dc, CSR_READ(sc, WMREG_DC));
   6615 	WM_EVCNT_ADD(&sc->sc_ev_prc64, CSR_READ(sc, WMREG_PRC64));
   6616 	WM_EVCNT_ADD(&sc->sc_ev_prc127, CSR_READ(sc, WMREG_PRC127));
   6617 	WM_EVCNT_ADD(&sc->sc_ev_prc255, CSR_READ(sc, WMREG_PRC255));
   6618 	WM_EVCNT_ADD(&sc->sc_ev_prc511, CSR_READ(sc, WMREG_PRC511));
   6619 	WM_EVCNT_ADD(&sc->sc_ev_prc1023, CSR_READ(sc, WMREG_PRC1023));
   6620 	WM_EVCNT_ADD(&sc->sc_ev_prc1522, CSR_READ(sc, WMREG_PRC1522));
   6621 	WM_EVCNT_ADD(&sc->sc_ev_gprc, CSR_READ(sc, WMREG_GPRC));
   6622 	WM_EVCNT_ADD(&sc->sc_ev_bprc, CSR_READ(sc, WMREG_BPRC));
   6623 	WM_EVCNT_ADD(&sc->sc_ev_mprc, CSR_READ(sc, WMREG_MPRC));
   6624 	WM_EVCNT_ADD(&sc->sc_ev_gptc, CSR_READ(sc, WMREG_GPTC));
   6625 
   6626 	WM_EVCNT_ADD(&sc->sc_ev_gorc,
   6627 	    CSR_READ(sc, WMREG_GORCL) +
   6628 	    ((uint64_t)CSR_READ(sc, WMREG_GORCH) << 32));
   6629 	WM_EVCNT_ADD(&sc->sc_ev_gotc,
   6630 	    CSR_READ(sc, WMREG_GOTCL) +
   6631 	    ((uint64_t)CSR_READ(sc, WMREG_GOTCH) << 32));
   6632 
   6633 	WM_EVCNT_ADD(&sc->sc_ev_rnbc, CSR_READ(sc, WMREG_RNBC));
   6634 	WM_EVCNT_ADD(&sc->sc_ev_ruc, CSR_READ(sc, WMREG_RUC));
   6635 	WM_EVCNT_ADD(&sc->sc_ev_rfc, CSR_READ(sc, WMREG_RFC));
   6636 	WM_EVCNT_ADD(&sc->sc_ev_roc, CSR_READ(sc, WMREG_ROC));
   6637 	WM_EVCNT_ADD(&sc->sc_ev_rjc, CSR_READ(sc, WMREG_RJC));
   6638 
   6639 	if (sc->sc_type >= WM_T_82540) {
   6640 		WM_EVCNT_ADD(&sc->sc_ev_mgtprc, CSR_READ(sc, WMREG_MGTPRC));
   6641 		WM_EVCNT_ADD(&sc->sc_ev_mgtpdc, CSR_READ(sc, WMREG_MGTPDC));
   6642 		WM_EVCNT_ADD(&sc->sc_ev_mgtptc, CSR_READ(sc, WMREG_MGTPTC));
   6643 	}
   6644 
   6645 	/*
   6646 	 * The TOR(L) register includes:
   6647 	 *  - Error
   6648 	 *  - Flow control
   6649 	 *  - Broadcast rejected (This note is described in 82574 and newer
   6650 	 *    datasheets. What does "broadcast rejected" mean?)
   6651 	 */
   6652 	WM_EVCNT_ADD(&sc->sc_ev_tor,
   6653 	    CSR_READ(sc, WMREG_TORL) +
   6654 	    ((uint64_t)CSR_READ(sc, WMREG_TORH) << 32));
   6655 	WM_EVCNT_ADD(&sc->sc_ev_tot,
   6656 	    CSR_READ(sc, WMREG_TOTL) +
   6657 	    ((uint64_t)CSR_READ(sc, WMREG_TOTH) << 32));
   6658 
   6659 	WM_EVCNT_ADD(&sc->sc_ev_tpr, CSR_READ(sc, WMREG_TPR));
   6660 	WM_EVCNT_ADD(&sc->sc_ev_tpt, CSR_READ(sc, WMREG_TPT));
   6661 	WM_EVCNT_ADD(&sc->sc_ev_ptc64, CSR_READ(sc, WMREG_PTC64));
   6662 	WM_EVCNT_ADD(&sc->sc_ev_ptc127, CSR_READ(sc, WMREG_PTC127));
   6663 	WM_EVCNT_ADD(&sc->sc_ev_ptc255, CSR_READ(sc, WMREG_PTC255));
   6664 	WM_EVCNT_ADD(&sc->sc_ev_ptc511, CSR_READ(sc, WMREG_PTC511));
   6665 	WM_EVCNT_ADD(&sc->sc_ev_ptc1023, CSR_READ(sc, WMREG_PTC1023));
   6666 	WM_EVCNT_ADD(&sc->sc_ev_ptc1522, CSR_READ(sc, WMREG_PTC1522));
   6667 	WM_EVCNT_ADD(&sc->sc_ev_mptc, CSR_READ(sc, WMREG_MPTC));
   6668 	WM_EVCNT_ADD(&sc->sc_ev_bptc, CSR_READ(sc, WMREG_BPTC));
   6669 	WM_EVCNT_ADD(&sc->sc_ev_iac, CSR_READ(sc, WMREG_IAC));
   6670 	if (sc->sc_type < WM_T_82575) {
   6671 		WM_EVCNT_ADD(&sc->sc_ev_icrxptc, CSR_READ(sc, WMREG_ICRXPTC));
   6672 		WM_EVCNT_ADD(&sc->sc_ev_icrxatc, CSR_READ(sc, WMREG_ICRXATC));
   6673 		WM_EVCNT_ADD(&sc->sc_ev_ictxptc, CSR_READ(sc, WMREG_ICTXPTC));
   6674 		WM_EVCNT_ADD(&sc->sc_ev_ictxatc, CSR_READ(sc, WMREG_ICTXATC));
   6675 		WM_EVCNT_ADD(&sc->sc_ev_ictxqec, CSR_READ(sc, WMREG_ICTXQEC));
   6676 		WM_EVCNT_ADD(&sc->sc_ev_ictxqmtc,
   6677 		    CSR_READ(sc, WMREG_ICTXQMTC));
   6678 		WM_EVCNT_ADD(&sc->sc_ev_rxdmtc,
   6679 		    CSR_READ(sc, WMREG_ICRXDMTC));
   6680 		WM_EVCNT_ADD(&sc->sc_ev_icrxoc, CSR_READ(sc, WMREG_ICRXOC));
   6681 	} else if (!WM_IS_ICHPCH(sc)) {
   6682 		WM_EVCNT_ADD(&sc->sc_ev_rpthc, CSR_READ(sc, WMREG_RPTHC));
   6683 		WM_EVCNT_ADD(&sc->sc_ev_debug1, CSR_READ(sc, WMREG_DEBUG1));
   6684 		WM_EVCNT_ADD(&sc->sc_ev_debug2, CSR_READ(sc, WMREG_DEBUG2));
   6685 		WM_EVCNT_ADD(&sc->sc_ev_debug3, CSR_READ(sc, WMREG_DEBUG3));
   6686 		WM_EVCNT_ADD(&sc->sc_ev_hgptc,  CSR_READ(sc, WMREG_HGPTC));
   6687 		WM_EVCNT_ADD(&sc->sc_ev_debug4, CSR_READ(sc, WMREG_DEBUG4));
   6688 		WM_EVCNT_ADD(&sc->sc_ev_rxdmtc, CSR_READ(sc, WMREG_RXDMTC));
   6689 		WM_EVCNT_ADD(&sc->sc_ev_htcbdpc, CSR_READ(sc, WMREG_HTCBDPC));
   6690 
   6691 		WM_EVCNT_ADD(&sc->sc_ev_hgorc,
   6692 		    CSR_READ(sc, WMREG_HGORCL) +
   6693 		    ((uint64_t)CSR_READ(sc, WMREG_HGORCH) << 32));
   6694 		WM_EVCNT_ADD(&sc->sc_ev_hgotc,
   6695 		    CSR_READ(sc, WMREG_HGOTCL) +
   6696 		    ((uint64_t)CSR_READ(sc, WMREG_HGOTCH) << 32));
   6697 		WM_EVCNT_ADD(&sc->sc_ev_lenerrs, CSR_READ(sc, WMREG_LENERRS));
   6698 	}
   6699 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   6700 		WM_EVCNT_ADD(&sc->sc_ev_tlpic, CSR_READ(sc, WMREG_TLPIC));
   6701 		WM_EVCNT_ADD(&sc->sc_ev_rlpic, CSR_READ(sc, WMREG_RLPIC));
   6702 		if ((CSR_READ(sc, WMREG_MANC) & MANC_EN_BMC2OS) != 0) {
   6703 			WM_EVCNT_ADD(&sc->sc_ev_b2ogprc,
   6704 			    CSR_READ(sc, WMREG_B2OGPRC));
   6705 			WM_EVCNT_ADD(&sc->sc_ev_o2bspc,
   6706 			    CSR_READ(sc, WMREG_O2BSPC));
   6707 			WM_EVCNT_ADD(&sc->sc_ev_b2ospc,
   6708 			    CSR_READ(sc, WMREG_B2OSPC));
   6709 			WM_EVCNT_ADD(&sc->sc_ev_o2bgptc,
   6710 			    CSR_READ(sc, WMREG_O2BGPTC));
   6711 		}
   6712 		WM_EVCNT_ADD(&sc->sc_ev_scvpc, CSR_READ(sc, WMREG_SCVPC));
   6713 		WM_EVCNT_ADD(&sc->sc_ev_hrmpc, CSR_READ(sc, WMREG_HRMPC));
   6714 	}
   6715 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   6716 	if_statadd_ref(nsr, if_collisions, colc);
   6717 	if_statadd_ref(nsr, if_ierrors,
   6718 	    crcerrs + algnerrc + symerrc + rxerrc + sec + cexterr + rlec);
   6719 	/*
   6720 	 * WMREG_RNBC is incremented when there are no available buffers in
   6721 	 * host memory. It does not mean the number of dropped packets, because
   6722 	 * an Ethernet controller can receive packets in such case if there is
   6723 	 * space in the phy's FIFO.
   6724 	 *
   6725 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   6726 	 * own EVCNT instead of if_iqdrops.
   6727 	 */
   6728 	if_statadd_ref(nsr, if_iqdrops, mpc);
   6729 	IF_STAT_PUTREF(ifp);
   6730 }
   6731 
   6732 void
   6733 wm_clear_evcnt(struct wm_softc *sc)
   6734 {
   6735 #ifdef WM_EVENT_COUNTERS
   6736 	int i;
   6737 
   6738 	/* RX queues */
   6739 	for (i = 0; i < sc->sc_nqueues; i++) {
   6740 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6741 
   6742 		WM_Q_EVCNT_STORE(rxq, intr, 0);
   6743 		WM_Q_EVCNT_STORE(rxq, defer, 0);
   6744 		WM_Q_EVCNT_STORE(rxq, ipsum, 0);
   6745 		WM_Q_EVCNT_STORE(rxq, tusum, 0);
   6746 	}
   6747 
   6748 	/* TX queues */
   6749 	for (i = 0; i < sc->sc_nqueues; i++) {
   6750 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6751 		int j;
   6752 
   6753 		WM_Q_EVCNT_STORE(txq, txsstall, 0);
   6754 		WM_Q_EVCNT_STORE(txq, txdstall, 0);
   6755 		WM_Q_EVCNT_STORE(txq, fifo_stall, 0);
   6756 		WM_Q_EVCNT_STORE(txq, txdw, 0);
   6757 		WM_Q_EVCNT_STORE(txq, txqe, 0);
   6758 		WM_Q_EVCNT_STORE(txq, ipsum, 0);
   6759 		WM_Q_EVCNT_STORE(txq, tusum, 0);
   6760 		WM_Q_EVCNT_STORE(txq, tusum6, 0);
   6761 		WM_Q_EVCNT_STORE(txq, tso, 0);
   6762 		WM_Q_EVCNT_STORE(txq, tso6, 0);
   6763 		WM_Q_EVCNT_STORE(txq, tsopain, 0);
   6764 
   6765 		for (j = 0; j < WM_NTXSEGS; j++)
   6766 			WM_EVCNT_STORE(&txq->txq_ev_txseg[j], 0);
   6767 
   6768 		WM_Q_EVCNT_STORE(txq, pcqdrop, 0);
   6769 		WM_Q_EVCNT_STORE(txq, descdrop, 0);
   6770 		WM_Q_EVCNT_STORE(txq, toomanyseg, 0);
   6771 		WM_Q_EVCNT_STORE(txq, defrag, 0);
   6772 		if (sc->sc_type <= WM_T_82544)
   6773 			WM_Q_EVCNT_STORE(txq, underrun, 0);
   6774 		WM_Q_EVCNT_STORE(txq, skipcontext, 0);
   6775 	}
   6776 
   6777 	/* Miscs */
   6778 	WM_EVCNT_STORE(&sc->sc_ev_linkintr, 0);
   6779 
   6780 	WM_EVCNT_STORE(&sc->sc_ev_crcerrs, 0);
   6781 	WM_EVCNT_STORE(&sc->sc_ev_symerrc, 0);
   6782 	WM_EVCNT_STORE(&sc->sc_ev_mpc, 0);
   6783 	WM_EVCNT_STORE(&sc->sc_ev_colc, 0);
   6784 	WM_EVCNT_STORE(&sc->sc_ev_sec, 0);
   6785 	WM_EVCNT_STORE(&sc->sc_ev_rlec, 0);
   6786 
   6787 	if (sc->sc_type >= WM_T_82543) {
   6788 		WM_EVCNT_STORE(&sc->sc_ev_algnerrc, 0);
   6789 		WM_EVCNT_STORE(&sc->sc_ev_rxerrc, 0);
   6790 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   6791 			WM_EVCNT_STORE(&sc->sc_ev_cexterr, 0);
   6792 		else
   6793 			WM_EVCNT_STORE(&sc->sc_ev_htdpmc, 0);
   6794 
   6795 		WM_EVCNT_STORE(&sc->sc_ev_tncrs, 0);
   6796 		WM_EVCNT_STORE(&sc->sc_ev_tsctc, 0);
   6797 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   6798 			WM_EVCNT_STORE(&sc->sc_ev_tsctfc, 0);
   6799 		else {
   6800 			WM_EVCNT_STORE(&sc->sc_ev_cbrdpc, 0);
   6801 			WM_EVCNT_STORE(&sc->sc_ev_cbrmpc, 0);
   6802 		}
   6803 	}
   6804 
   6805 	if (sc->sc_type >= WM_T_82542_2_1) {
   6806 		WM_EVCNT_STORE(&sc->sc_ev_tx_xoff, 0);
   6807 		WM_EVCNT_STORE(&sc->sc_ev_tx_xon, 0);
   6808 		WM_EVCNT_STORE(&sc->sc_ev_rx_xoff, 0);
   6809 		WM_EVCNT_STORE(&sc->sc_ev_rx_xon, 0);
   6810 		WM_EVCNT_STORE(&sc->sc_ev_rx_macctl, 0);
   6811 	}
   6812 
   6813 	WM_EVCNT_STORE(&sc->sc_ev_scc, 0);
   6814 	WM_EVCNT_STORE(&sc->sc_ev_ecol, 0);
   6815 	WM_EVCNT_STORE(&sc->sc_ev_mcc, 0);
   6816 	WM_EVCNT_STORE(&sc->sc_ev_latecol, 0);
   6817 
   6818 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
   6819 		WM_EVCNT_STORE(&sc->sc_ev_cbtmpc, 0);
   6820 
   6821 	WM_EVCNT_STORE(&sc->sc_ev_dc, 0);
   6822 	WM_EVCNT_STORE(&sc->sc_ev_prc64, 0);
   6823 	WM_EVCNT_STORE(&sc->sc_ev_prc127, 0);
   6824 	WM_EVCNT_STORE(&sc->sc_ev_prc255, 0);
   6825 	WM_EVCNT_STORE(&sc->sc_ev_prc511, 0);
   6826 	WM_EVCNT_STORE(&sc->sc_ev_prc1023, 0);
   6827 	WM_EVCNT_STORE(&sc->sc_ev_prc1522, 0);
   6828 	WM_EVCNT_STORE(&sc->sc_ev_gprc, 0);
   6829 	WM_EVCNT_STORE(&sc->sc_ev_bprc, 0);
   6830 	WM_EVCNT_STORE(&sc->sc_ev_mprc, 0);
   6831 	WM_EVCNT_STORE(&sc->sc_ev_gptc, 0);
   6832 	WM_EVCNT_STORE(&sc->sc_ev_gorc, 0);
   6833 	WM_EVCNT_STORE(&sc->sc_ev_gotc, 0);
   6834 	WM_EVCNT_STORE(&sc->sc_ev_rnbc, 0);
   6835 	WM_EVCNT_STORE(&sc->sc_ev_ruc, 0);
   6836 	WM_EVCNT_STORE(&sc->sc_ev_rfc, 0);
   6837 	WM_EVCNT_STORE(&sc->sc_ev_roc, 0);
   6838 	WM_EVCNT_STORE(&sc->sc_ev_rjc, 0);
   6839 	if (sc->sc_type >= WM_T_82540) {
   6840 		WM_EVCNT_STORE(&sc->sc_ev_mgtprc, 0);
   6841 		WM_EVCNT_STORE(&sc->sc_ev_mgtpdc, 0);
   6842 		WM_EVCNT_STORE(&sc->sc_ev_mgtptc, 0);
   6843 	}
   6844 	WM_EVCNT_STORE(&sc->sc_ev_tor, 0);
   6845 	WM_EVCNT_STORE(&sc->sc_ev_tot, 0);
   6846 	WM_EVCNT_STORE(&sc->sc_ev_tpr, 0);
   6847 	WM_EVCNT_STORE(&sc->sc_ev_tpt, 0);
   6848 	WM_EVCNT_STORE(&sc->sc_ev_ptc64, 0);
   6849 	WM_EVCNT_STORE(&sc->sc_ev_ptc127, 0);
   6850 	WM_EVCNT_STORE(&sc->sc_ev_ptc255, 0);
   6851 	WM_EVCNT_STORE(&sc->sc_ev_ptc511, 0);
   6852 	WM_EVCNT_STORE(&sc->sc_ev_ptc1023, 0);
   6853 	WM_EVCNT_STORE(&sc->sc_ev_ptc1522, 0);
   6854 	WM_EVCNT_STORE(&sc->sc_ev_mptc, 0);
   6855 	WM_EVCNT_STORE(&sc->sc_ev_bptc, 0);
   6856 	WM_EVCNT_STORE(&sc->sc_ev_iac, 0);
   6857 	if (sc->sc_type < WM_T_82575) {
   6858 		WM_EVCNT_STORE(&sc->sc_ev_icrxptc, 0);
   6859 		WM_EVCNT_STORE(&sc->sc_ev_icrxatc, 0);
   6860 		WM_EVCNT_STORE(&sc->sc_ev_ictxptc, 0);
   6861 		WM_EVCNT_STORE(&sc->sc_ev_ictxatc, 0);
   6862 		WM_EVCNT_STORE(&sc->sc_ev_ictxqec, 0);
   6863 		WM_EVCNT_STORE(&sc->sc_ev_ictxqmtc, 0);
   6864 		WM_EVCNT_STORE(&sc->sc_ev_rxdmtc, 0);
   6865 		WM_EVCNT_STORE(&sc->sc_ev_icrxoc, 0);
   6866 	} else if (!WM_IS_ICHPCH(sc)) {
   6867 		WM_EVCNT_STORE(&sc->sc_ev_rpthc, 0);
   6868 		WM_EVCNT_STORE(&sc->sc_ev_debug1, 0);
   6869 		WM_EVCNT_STORE(&sc->sc_ev_debug2, 0);
   6870 		WM_EVCNT_STORE(&sc->sc_ev_debug3, 0);
   6871 		WM_EVCNT_STORE(&sc->sc_ev_hgptc, 0);
   6872 		WM_EVCNT_STORE(&sc->sc_ev_debug4, 0);
   6873 		WM_EVCNT_STORE(&sc->sc_ev_rxdmtc, 0);
   6874 		WM_EVCNT_STORE(&sc->sc_ev_htcbdpc, 0);
   6875 
   6876 		WM_EVCNT_STORE(&sc->sc_ev_hgorc, 0);
   6877 		WM_EVCNT_STORE(&sc->sc_ev_hgotc, 0);
   6878 		WM_EVCNT_STORE(&sc->sc_ev_lenerrs, 0);
   6879 	}
   6880 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   6881 		WM_EVCNT_STORE(&sc->sc_ev_tlpic, 0);
   6882 		WM_EVCNT_STORE(&sc->sc_ev_rlpic, 0);
   6883 		WM_EVCNT_STORE(&sc->sc_ev_b2ogprc, 0);
   6884 		WM_EVCNT_STORE(&sc->sc_ev_o2bspc, 0);
   6885 		WM_EVCNT_STORE(&sc->sc_ev_b2ospc, 0);
   6886 		WM_EVCNT_STORE(&sc->sc_ev_o2bgptc, 0);
   6887 		WM_EVCNT_STORE(&sc->sc_ev_scvpc, 0);
   6888 		WM_EVCNT_STORE(&sc->sc_ev_hrmpc, 0);
   6889 	}
   6890 #endif
   6891 }
   6892 
   6893 /*
   6894  * wm_init:		[ifnet interface function]
   6895  *
   6896  *	Initialize the interface.
   6897  */
   6898 static int
   6899 wm_init(struct ifnet *ifp)
   6900 {
   6901 	struct wm_softc *sc = ifp->if_softc;
   6902 	int ret;
   6903 
   6904 	KASSERT(IFNET_LOCKED(ifp));
   6905 
   6906 	if (sc->sc_dying)
   6907 		return ENXIO;
   6908 
   6909 	mutex_enter(sc->sc_core_lock);
   6910 	ret = wm_init_locked(ifp);
   6911 	mutex_exit(sc->sc_core_lock);
   6912 
   6913 	return ret;
   6914 }
   6915 
   6916 static int
   6917 wm_init_locked(struct ifnet *ifp)
   6918 {
   6919 	struct wm_softc *sc = ifp->if_softc;
   6920 	struct ethercom *ec = &sc->sc_ethercom;
   6921 	int i, j, trynum, error = 0;
   6922 	uint32_t reg, sfp_mask = 0;
   6923 
   6924 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6925 		device_xname(sc->sc_dev), __func__));
   6926 	KASSERT(IFNET_LOCKED(ifp));
   6927 	KASSERT(mutex_owned(sc->sc_core_lock));
   6928 
   6929 	/*
   6930 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   6931 	 * There is a small but measurable benefit to avoiding the adjusment
   6932 	 * of the descriptor so that the headers are aligned, for normal mtu,
   6933 	 * on such platforms.  One possibility is that the DMA itself is
   6934 	 * slightly more efficient if the front of the entire packet (instead
   6935 	 * of the front of the headers) is aligned.
   6936 	 *
   6937 	 * Note we must always set align_tweak to 0 if we are using
   6938 	 * jumbo frames.
   6939 	 */
   6940 #ifdef __NO_STRICT_ALIGNMENT
   6941 	sc->sc_align_tweak = 0;
   6942 #else
   6943 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   6944 		sc->sc_align_tweak = 0;
   6945 	else
   6946 		sc->sc_align_tweak = 2;
   6947 #endif /* __NO_STRICT_ALIGNMENT */
   6948 
   6949 	/* Cancel any pending I/O. */
   6950 	wm_stop_locked(ifp, false, false);
   6951 
   6952 	/* Update statistics before reset */
   6953 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   6954 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   6955 
   6956 	/* >= PCH_SPT hardware workaround before reset. */
   6957 	if (sc->sc_type >= WM_T_PCH_SPT)
   6958 		wm_flush_desc_rings(sc);
   6959 
   6960 	/* Reset the chip to a known state. */
   6961 	wm_reset(sc);
   6962 
   6963 	/*
   6964 	 * AMT based hardware can now take control from firmware
   6965 	 * Do this after reset.
   6966 	 */
   6967 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   6968 		wm_get_hw_control(sc);
   6969 
   6970 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   6971 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   6972 		wm_legacy_irq_quirk_spt(sc);
   6973 
   6974 	/* Init hardware bits */
   6975 	wm_initialize_hardware_bits(sc);
   6976 
   6977 	/* Reset the PHY. */
   6978 	if (sc->sc_flags & WM_F_HAS_MII)
   6979 		wm_gmii_reset(sc);
   6980 
   6981 	if (sc->sc_type >= WM_T_ICH8) {
   6982 		reg = CSR_READ(sc, WMREG_GCR);
   6983 		/*
   6984 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   6985 		 * default after reset.
   6986 		 */
   6987 		if (sc->sc_type == WM_T_ICH8)
   6988 			reg |= GCR_NO_SNOOP_ALL;
   6989 		else
   6990 			reg &= ~GCR_NO_SNOOP_ALL;
   6991 		CSR_WRITE(sc, WMREG_GCR, reg);
   6992 	}
   6993 
   6994 	if ((sc->sc_type >= WM_T_ICH8)
   6995 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   6996 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   6997 
   6998 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6999 		reg |= CTRL_EXT_RO_DIS;
   7000 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7001 	}
   7002 
   7003 	/* Calculate (E)ITR value */
   7004 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   7005 		/*
   7006 		 * For NEWQUEUE's EITR (except for 82575).
   7007 		 * 82575's EITR should be set same throttling value as other
   7008 		 * old controllers' ITR because the interrupt/sec calculation
   7009 		 * is the same, that is, 1,000,000,000 / (N * 256).
   7010 		 *
   7011 		 * 82574's EITR should be set same throttling value as ITR.
   7012 		 *
   7013 		 * For N interrupts/sec, set this value to:
   7014 		 * 1,000,000 / N in contrast to ITR throttling value.
   7015 		 */
   7016 		sc->sc_itr_init = 450;
   7017 	} else if (sc->sc_type >= WM_T_82543) {
   7018 		/*
   7019 		 * Set up the interrupt throttling register (units of 256ns)
   7020 		 * Note that a footnote in Intel's documentation says this
   7021 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   7022 		 * or 10Mbit mode.  Empirically, it appears to be the case
   7023 		 * that that is also true for the 1024ns units of the other
   7024 		 * interrupt-related timer registers -- so, really, we ought
   7025 		 * to divide this value by 4 when the link speed is low.
   7026 		 *
   7027 		 * XXX implement this division at link speed change!
   7028 		 */
   7029 
   7030 		/*
   7031 		 * For N interrupts/sec, set this value to:
   7032 		 * 1,000,000,000 / (N * 256).  Note that we set the
   7033 		 * absolute and packet timer values to this value
   7034 		 * divided by 4 to get "simple timer" behavior.
   7035 		 */
   7036 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   7037 	}
   7038 
   7039 	error = wm_init_txrx_queues(sc);
   7040 	if (error)
   7041 		goto out;
   7042 
   7043 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   7044 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   7045 	    (sc->sc_type >= WM_T_82575))
   7046 		wm_serdes_power_up_link_82575(sc);
   7047 
   7048 	/* Clear out the VLAN table -- we don't use it (yet). */
   7049 	CSR_WRITE(sc, WMREG_VET, 0);
   7050 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   7051 		trynum = 10; /* Due to hw errata */
   7052 	else
   7053 		trynum = 1;
   7054 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   7055 		for (j = 0; j < trynum; j++)
   7056 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   7057 
   7058 	/*
   7059 	 * Set up flow-control parameters.
   7060 	 *
   7061 	 * XXX Values could probably stand some tuning.
   7062 	 */
   7063 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   7064 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   7065 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   7066 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   7067 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   7068 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   7069 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   7070 	}
   7071 
   7072 	sc->sc_fcrtl = FCRTL_DFLT;
   7073 	if (sc->sc_type < WM_T_82543) {
   7074 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   7075 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   7076 	} else {
   7077 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   7078 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   7079 	}
   7080 
   7081 	if (sc->sc_type == WM_T_80003)
   7082 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   7083 	else
   7084 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   7085 
   7086 	/* Writes the control register. */
   7087 	wm_set_vlan(sc);
   7088 
   7089 	if (sc->sc_flags & WM_F_HAS_MII) {
   7090 		uint16_t kmreg;
   7091 
   7092 		switch (sc->sc_type) {
   7093 		case WM_T_80003:
   7094 		case WM_T_ICH8:
   7095 		case WM_T_ICH9:
   7096 		case WM_T_ICH10:
   7097 		case WM_T_PCH:
   7098 		case WM_T_PCH2:
   7099 		case WM_T_PCH_LPT:
   7100 		case WM_T_PCH_SPT:
   7101 		case WM_T_PCH_CNP:
   7102 			/*
   7103 			 * Set the mac to wait the maximum time between each
   7104 			 * iteration and increase the max iterations when
   7105 			 * polling the phy; this fixes erroneous timeouts at
   7106 			 * 10Mbps.
   7107 			 */
   7108 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   7109 			    0xFFFF);
   7110 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   7111 			    &kmreg);
   7112 			kmreg |= 0x3F;
   7113 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   7114 			    kmreg);
   7115 			break;
   7116 		default:
   7117 			break;
   7118 		}
   7119 
   7120 		if (sc->sc_type == WM_T_80003) {
   7121 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7122 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   7123 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7124 
   7125 			/* Bypass RX and TX FIFOs */
   7126 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   7127 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   7128 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   7129 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   7130 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   7131 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   7132 		}
   7133 	}
   7134 #if 0
   7135 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   7136 #endif
   7137 
   7138 	/* Set up checksum offload parameters. */
   7139 	reg = CSR_READ(sc, WMREG_RXCSUM);
   7140 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   7141 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   7142 		reg |= RXCSUM_IPOFL;
   7143 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   7144 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   7145 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   7146 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   7147 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   7148 
   7149 	/* Set registers about MSI-X */
   7150 	if (wm_is_using_msix(sc)) {
   7151 		uint32_t ivar, qintr_idx;
   7152 		struct wm_queue *wmq;
   7153 		unsigned int qid;
   7154 
   7155 		if (sc->sc_type == WM_T_82575) {
   7156 			/* Interrupt control */
   7157 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7158 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   7159 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7160 
   7161 			/* TX and RX */
   7162 			for (i = 0; i < sc->sc_nqueues; i++) {
   7163 				wmq = &sc->sc_queue[i];
   7164 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   7165 				    EITR_TX_QUEUE(wmq->wmq_id)
   7166 				    | EITR_RX_QUEUE(wmq->wmq_id));
   7167 			}
   7168 			/* Link status */
   7169 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   7170 			    EITR_OTHER);
   7171 		} else if (sc->sc_type == WM_T_82574) {
   7172 			/* Interrupt control */
   7173 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7174 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   7175 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7176 
   7177 			/*
   7178 			 * Work around issue with spurious interrupts
   7179 			 * in MSI-X mode.
   7180 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   7181 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   7182 			 */
   7183 			reg = CSR_READ(sc, WMREG_RFCTL);
   7184 			reg |= WMREG_RFCTL_ACKDIS;
   7185 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   7186 
   7187 			ivar = 0;
   7188 			/* TX and RX */
   7189 			for (i = 0; i < sc->sc_nqueues; i++) {
   7190 				wmq = &sc->sc_queue[i];
   7191 				qid = wmq->wmq_id;
   7192 				qintr_idx = wmq->wmq_intr_idx;
   7193 
   7194 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   7195 				    IVAR_TX_MASK_Q_82574(qid));
   7196 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   7197 				    IVAR_RX_MASK_Q_82574(qid));
   7198 			}
   7199 			/* Link status */
   7200 			ivar |= __SHIFTIN((IVAR_VALID_82574
   7201 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   7202 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   7203 		} else {
   7204 			/* Interrupt control */
   7205 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   7206 			    | GPIE_EIAME | GPIE_PBA);
   7207 
   7208 			switch (sc->sc_type) {
   7209 			case WM_T_82580:
   7210 			case WM_T_I350:
   7211 			case WM_T_I354:
   7212 			case WM_T_I210:
   7213 			case WM_T_I211:
   7214 				/* TX and RX */
   7215 				for (i = 0; i < sc->sc_nqueues; i++) {
   7216 					wmq = &sc->sc_queue[i];
   7217 					qid = wmq->wmq_id;
   7218 					qintr_idx = wmq->wmq_intr_idx;
   7219 
   7220 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   7221 					ivar &= ~IVAR_TX_MASK_Q(qid);
   7222 					ivar |= __SHIFTIN((qintr_idx
   7223 						| IVAR_VALID),
   7224 					    IVAR_TX_MASK_Q(qid));
   7225 					ivar &= ~IVAR_RX_MASK_Q(qid);
   7226 					ivar |= __SHIFTIN((qintr_idx
   7227 						| IVAR_VALID),
   7228 					    IVAR_RX_MASK_Q(qid));
   7229 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   7230 				}
   7231 				break;
   7232 			case WM_T_82576:
   7233 				/* TX and RX */
   7234 				for (i = 0; i < sc->sc_nqueues; i++) {
   7235 					wmq = &sc->sc_queue[i];
   7236 					qid = wmq->wmq_id;
   7237 					qintr_idx = wmq->wmq_intr_idx;
   7238 
   7239 					ivar = CSR_READ(sc,
   7240 					    WMREG_IVAR_Q_82576(qid));
   7241 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   7242 					ivar |= __SHIFTIN((qintr_idx
   7243 						| IVAR_VALID),
   7244 					    IVAR_TX_MASK_Q_82576(qid));
   7245 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   7246 					ivar |= __SHIFTIN((qintr_idx
   7247 						| IVAR_VALID),
   7248 					    IVAR_RX_MASK_Q_82576(qid));
   7249 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   7250 					    ivar);
   7251 				}
   7252 				break;
   7253 			default:
   7254 				break;
   7255 			}
   7256 
   7257 			/* Link status */
   7258 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   7259 			    IVAR_MISC_OTHER);
   7260 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   7261 		}
   7262 
   7263 		if (wm_is_using_multiqueue(sc)) {
   7264 			wm_init_rss(sc);
   7265 
   7266 			/*
   7267 			** NOTE: Receive Full-Packet Checksum Offload
   7268 			** is mutually exclusive with Multiqueue. However
   7269 			** this is not the same as TCP/IP checksums which
   7270 			** still work.
   7271 			*/
   7272 			reg = CSR_READ(sc, WMREG_RXCSUM);
   7273 			reg |= RXCSUM_PCSD;
   7274 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   7275 		}
   7276 	}
   7277 
   7278 	/* Set up the interrupt registers. */
   7279 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   7280 
   7281 	/* Enable SFP module insertion interrupt if it's required */
   7282 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   7283 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   7284 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7285 		sfp_mask = ICR_GPI(0);
   7286 	}
   7287 
   7288 	if (wm_is_using_msix(sc)) {
   7289 		uint32_t mask;
   7290 		struct wm_queue *wmq;
   7291 
   7292 		switch (sc->sc_type) {
   7293 		case WM_T_82574:
   7294 			mask = 0;
   7295 			for (i = 0; i < sc->sc_nqueues; i++) {
   7296 				wmq = &sc->sc_queue[i];
   7297 				mask |= ICR_TXQ(wmq->wmq_id);
   7298 				mask |= ICR_RXQ(wmq->wmq_id);
   7299 			}
   7300 			mask |= ICR_OTHER;
   7301 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   7302 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   7303 			break;
   7304 		default:
   7305 			if (sc->sc_type == WM_T_82575) {
   7306 				mask = 0;
   7307 				for (i = 0; i < sc->sc_nqueues; i++) {
   7308 					wmq = &sc->sc_queue[i];
   7309 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   7310 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   7311 				}
   7312 				mask |= EITR_OTHER;
   7313 			} else {
   7314 				mask = 0;
   7315 				for (i = 0; i < sc->sc_nqueues; i++) {
   7316 					wmq = &sc->sc_queue[i];
   7317 					mask |= 1 << wmq->wmq_intr_idx;
   7318 				}
   7319 				mask |= 1 << sc->sc_link_intr_idx;
   7320 			}
   7321 			CSR_WRITE(sc, WMREG_EIAC, mask);
   7322 			CSR_WRITE(sc, WMREG_EIAM, mask);
   7323 			CSR_WRITE(sc, WMREG_EIMS, mask);
   7324 
   7325 			/* For other interrupts */
   7326 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   7327 			break;
   7328 		}
   7329 	} else {
   7330 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   7331 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   7332 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   7333 	}
   7334 
   7335 	/* Set up the inter-packet gap. */
   7336 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   7337 
   7338 	if (sc->sc_type >= WM_T_82543) {
   7339 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7340 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   7341 			wm_itrs_writereg(sc, wmq);
   7342 		}
   7343 		/*
   7344 		 * Link interrupts occur much less than TX
   7345 		 * interrupts and RX interrupts. So, we don't
   7346 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   7347 		 * FreeBSD's if_igb.
   7348 		 */
   7349 	}
   7350 
   7351 	/* Set the VLAN EtherType. */
   7352 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   7353 
   7354 	/*
   7355 	 * Set up the transmit control register; we start out with
   7356 	 * a collision distance suitable for FDX, but update it when
   7357 	 * we resolve the media type.
   7358 	 */
   7359 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   7360 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   7361 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7362 	if (sc->sc_type >= WM_T_82571)
   7363 		sc->sc_tctl |= TCTL_MULR;
   7364 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7365 
   7366 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7367 		/* Write TDT after TCTL.EN is set. See the document. */
   7368 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   7369 	}
   7370 
   7371 	if (sc->sc_type == WM_T_80003) {
   7372 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   7373 		reg &= ~TCTL_EXT_GCEX_MASK;
   7374 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   7375 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   7376 	}
   7377 
   7378 	/* Set the media. */
   7379 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   7380 		goto out;
   7381 
   7382 	/* Configure for OS presence */
   7383 	wm_init_manageability(sc);
   7384 
   7385 	/*
   7386 	 * Set up the receive control register; we actually program the
   7387 	 * register when we set the receive filter. Use multicast address
   7388 	 * offset type 0.
   7389 	 *
   7390 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   7391 	 * don't enable that feature.
   7392 	 */
   7393 	sc->sc_mchash_type = 0;
   7394 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   7395 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   7396 
   7397 	/* 82574 use one buffer extended Rx descriptor. */
   7398 	if (sc->sc_type == WM_T_82574)
   7399 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   7400 
   7401 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
   7402 		sc->sc_rctl |= RCTL_SECRC;
   7403 
   7404 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   7405 	    && (ifp->if_mtu > ETHERMTU)) {
   7406 		sc->sc_rctl |= RCTL_LPE;
   7407 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7408 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   7409 	}
   7410 
   7411 	if (MCLBYTES == 2048)
   7412 		sc->sc_rctl |= RCTL_2k;
   7413 	else {
   7414 		if (sc->sc_type >= WM_T_82543) {
   7415 			switch (MCLBYTES) {
   7416 			case 4096:
   7417 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   7418 				break;
   7419 			case 8192:
   7420 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   7421 				break;
   7422 			case 16384:
   7423 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   7424 				break;
   7425 			default:
   7426 				panic("wm_init: MCLBYTES %d unsupported",
   7427 				    MCLBYTES);
   7428 				break;
   7429 			}
   7430 		} else
   7431 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   7432 	}
   7433 
   7434 	/* Enable ECC */
   7435 	switch (sc->sc_type) {
   7436 	case WM_T_82571:
   7437 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   7438 		reg |= PBA_ECC_CORR_EN;
   7439 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   7440 		break;
   7441 	case WM_T_PCH_LPT:
   7442 	case WM_T_PCH_SPT:
   7443 	case WM_T_PCH_CNP:
   7444 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   7445 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   7446 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   7447 
   7448 		sc->sc_ctrl |= CTRL_MEHE;
   7449 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7450 		break;
   7451 	default:
   7452 		break;
   7453 	}
   7454 
   7455 	/*
   7456 	 * Set the receive filter.
   7457 	 *
   7458 	 * For 82575 and 82576, the RX descriptors must be initialized after
   7459 	 * the setting of RCTL.EN in wm_set_filter()
   7460 	 */
   7461 	wm_set_filter(sc);
   7462 
   7463 	/* On 575 and later set RDT only if RX enabled */
   7464 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7465 		int qidx;
   7466 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7467 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   7468 			for (i = 0; i < WM_NRXDESC; i++) {
   7469 				mutex_enter(rxq->rxq_lock);
   7470 				wm_init_rxdesc(rxq, i);
   7471 				mutex_exit(rxq->rxq_lock);
   7472 
   7473 			}
   7474 		}
   7475 	}
   7476 
   7477 	wm_unset_stopping_flags(sc);
   7478 
   7479 	/* Start the one second link check clock. */
   7480 	callout_schedule(&sc->sc_tick_ch, hz);
   7481 
   7482 	/*
   7483 	 * ...all done! (IFNET_LOCKED asserted above.)
   7484 	 */
   7485 	ifp->if_flags |= IFF_RUNNING;
   7486 
   7487 out:
   7488 	/* Save last flags for the callback */
   7489 	sc->sc_if_flags = ifp->if_flags;
   7490 	sc->sc_ec_capenable = ec->ec_capenable;
   7491 	if (error)
   7492 		log(LOG_ERR, "%s: interface not running\n",
   7493 		    device_xname(sc->sc_dev));
   7494 	return error;
   7495 }
   7496 
   7497 /*
   7498  * wm_stop:		[ifnet interface function]
   7499  *
   7500  *	Stop transmission on the interface.
   7501  */
   7502 static void
   7503 wm_stop(struct ifnet *ifp, int disable)
   7504 {
   7505 	struct wm_softc *sc = ifp->if_softc;
   7506 
   7507 	ASSERT_SLEEPABLE();
   7508 	KASSERT(IFNET_LOCKED(ifp));
   7509 
   7510 	mutex_enter(sc->sc_core_lock);
   7511 	wm_stop_locked(ifp, disable ? true : false, true);
   7512 	mutex_exit(sc->sc_core_lock);
   7513 
   7514 	/*
   7515 	 * After wm_set_stopping_flags(), it is guaranteed that
   7516 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   7517 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   7518 	 * because it can sleep...
   7519 	 * so, call workqueue_wait() here.
   7520 	 */
   7521 	for (int i = 0; i < sc->sc_nqueues; i++)
   7522 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   7523 	workqueue_wait(sc->sc_reset_wq, &sc->sc_reset_work);
   7524 }
   7525 
   7526 static void
   7527 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   7528 {
   7529 	struct wm_softc *sc = ifp->if_softc;
   7530 	struct wm_txsoft *txs;
   7531 	int i, qidx;
   7532 
   7533 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7534 		device_xname(sc->sc_dev), __func__));
   7535 	KASSERT(IFNET_LOCKED(ifp));
   7536 	KASSERT(mutex_owned(sc->sc_core_lock));
   7537 
   7538 	wm_set_stopping_flags(sc);
   7539 
   7540 	if (sc->sc_flags & WM_F_HAS_MII) {
   7541 		/* Down the MII. */
   7542 		mii_down(&sc->sc_mii);
   7543 	} else {
   7544 #if 0
   7545 		/* Should we clear PHY's status properly? */
   7546 		wm_reset(sc);
   7547 #endif
   7548 	}
   7549 
   7550 	/* Stop the transmit and receive processes. */
   7551 	CSR_WRITE(sc, WMREG_TCTL, 0);
   7552 	CSR_WRITE(sc, WMREG_RCTL, 0);
   7553 	sc->sc_rctl &= ~RCTL_EN;
   7554 
   7555 	/*
   7556 	 * Clear the interrupt mask to ensure the device cannot assert its
   7557 	 * interrupt line.
   7558 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   7559 	 * service any currently pending or shared interrupt.
   7560 	 */
   7561 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   7562 	sc->sc_icr = 0;
   7563 	if (wm_is_using_msix(sc)) {
   7564 		if (sc->sc_type != WM_T_82574) {
   7565 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   7566 			CSR_WRITE(sc, WMREG_EIAC, 0);
   7567 		} else
   7568 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   7569 	}
   7570 
   7571 	/*
   7572 	 * Stop callouts after interrupts are disabled; if we have
   7573 	 * to wait for them, we will be releasing the CORE_LOCK
   7574 	 * briefly, which will unblock interrupts on the current CPU.
   7575 	 */
   7576 
   7577 	/* Stop the one second clock. */
   7578 	if (wait)
   7579 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   7580 	else
   7581 		callout_stop(&sc->sc_tick_ch);
   7582 
   7583 	/* Stop the 82547 Tx FIFO stall check timer. */
   7584 	if (sc->sc_type == WM_T_82547) {
   7585 		if (wait)
   7586 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   7587 		else
   7588 			callout_stop(&sc->sc_txfifo_ch);
   7589 	}
   7590 
   7591 	/* Release any queued transmit buffers. */
   7592 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7593 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   7594 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7595 		struct mbuf *m;
   7596 
   7597 		mutex_enter(txq->txq_lock);
   7598 		txq->txq_sending = false; /* Ensure watchdog disabled */
   7599 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7600 			txs = &txq->txq_soft[i];
   7601 			if (txs->txs_mbuf != NULL) {
   7602 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   7603 				m_freem(txs->txs_mbuf);
   7604 				txs->txs_mbuf = NULL;
   7605 			}
   7606 		}
   7607 		/* Drain txq_interq */
   7608 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7609 			m_freem(m);
   7610 		mutex_exit(txq->txq_lock);
   7611 	}
   7612 
   7613 	/* Mark the interface as down and cancel the watchdog timer. */
   7614 	ifp->if_flags &= ~IFF_RUNNING;
   7615 	sc->sc_if_flags = ifp->if_flags;
   7616 
   7617 	if (disable) {
   7618 		for (i = 0; i < sc->sc_nqueues; i++) {
   7619 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7620 			mutex_enter(rxq->rxq_lock);
   7621 			wm_rxdrain(rxq);
   7622 			mutex_exit(rxq->rxq_lock);
   7623 		}
   7624 	}
   7625 
   7626 #if 0 /* notyet */
   7627 	if (sc->sc_type >= WM_T_82544)
   7628 		CSR_WRITE(sc, WMREG_WUC, 0);
   7629 #endif
   7630 }
   7631 
   7632 static void
   7633 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   7634 {
   7635 	struct mbuf *m;
   7636 	int i;
   7637 
   7638 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   7639 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   7640 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   7641 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   7642 		    m->m_data, m->m_len, m->m_flags);
   7643 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   7644 	    i, i == 1 ? "" : "s");
   7645 }
   7646 
   7647 /*
   7648  * wm_82547_txfifo_stall:
   7649  *
   7650  *	Callout used to wait for the 82547 Tx FIFO to drain,
   7651  *	reset the FIFO pointers, and restart packet transmission.
   7652  */
   7653 static void
   7654 wm_82547_txfifo_stall(void *arg)
   7655 {
   7656 	struct wm_softc *sc = arg;
   7657 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7658 
   7659 	mutex_enter(txq->txq_lock);
   7660 
   7661 	if (txq->txq_stopping)
   7662 		goto out;
   7663 
   7664 	if (txq->txq_fifo_stall) {
   7665 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   7666 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   7667 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   7668 			/*
   7669 			 * Packets have drained.  Stop transmitter, reset
   7670 			 * FIFO pointers, restart transmitter, and kick
   7671 			 * the packet queue.
   7672 			 */
   7673 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   7674 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   7675 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   7676 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   7677 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   7678 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   7679 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   7680 			CSR_WRITE_FLUSH(sc);
   7681 
   7682 			txq->txq_fifo_head = 0;
   7683 			txq->txq_fifo_stall = 0;
   7684 			wm_start_locked(&sc->sc_ethercom.ec_if);
   7685 		} else {
   7686 			/*
   7687 			 * Still waiting for packets to drain; try again in
   7688 			 * another tick.
   7689 			 */
   7690 			callout_schedule(&sc->sc_txfifo_ch, 1);
   7691 		}
   7692 	}
   7693 
   7694 out:
   7695 	mutex_exit(txq->txq_lock);
   7696 }
   7697 
   7698 /*
   7699  * wm_82547_txfifo_bugchk:
   7700  *
   7701  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   7702  *	prevent enqueueing a packet that would wrap around the end
   7703  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   7704  *
   7705  *	We do this by checking the amount of space before the end
   7706  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   7707  *	the Tx FIFO, wait for all remaining packets to drain, reset
   7708  *	the internal FIFO pointers to the beginning, and restart
   7709  *	transmission on the interface.
   7710  */
   7711 #define	WM_FIFO_HDR		0x10
   7712 #define	WM_82547_PAD_LEN	0x3e0
   7713 static int
   7714 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   7715 {
   7716 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7717 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   7718 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   7719 
   7720 	/* Just return if already stalled. */
   7721 	if (txq->txq_fifo_stall)
   7722 		return 1;
   7723 
   7724 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   7725 		/* Stall only occurs in half-duplex mode. */
   7726 		goto send_packet;
   7727 	}
   7728 
   7729 	if (len >= WM_82547_PAD_LEN + space) {
   7730 		txq->txq_fifo_stall = 1;
   7731 		callout_schedule(&sc->sc_txfifo_ch, 1);
   7732 		return 1;
   7733 	}
   7734 
   7735 send_packet:
   7736 	txq->txq_fifo_head += len;
   7737 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   7738 		txq->txq_fifo_head -= txq->txq_fifo_size;
   7739 
   7740 	return 0;
   7741 }
   7742 
   7743 static int
   7744 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7745 {
   7746 	int error;
   7747 
   7748 	/*
   7749 	 * Allocate the control data structures, and create and load the
   7750 	 * DMA map for it.
   7751 	 *
   7752 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7753 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7754 	 * both sets within the same 4G segment.
   7755 	 */
   7756 	if (sc->sc_type < WM_T_82544)
   7757 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   7758 	else
   7759 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   7760 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7761 		txq->txq_descsize = sizeof(nq_txdesc_t);
   7762 	else
   7763 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   7764 
   7765 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   7766 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   7767 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   7768 		aprint_error_dev(sc->sc_dev,
   7769 		    "unable to allocate TX control data, error = %d\n",
   7770 		    error);
   7771 		goto fail_0;
   7772 	}
   7773 
   7774 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   7775 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   7776 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7777 		aprint_error_dev(sc->sc_dev,
   7778 		    "unable to map TX control data, error = %d\n", error);
   7779 		goto fail_1;
   7780 	}
   7781 
   7782 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   7783 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   7784 		aprint_error_dev(sc->sc_dev,
   7785 		    "unable to create TX control data DMA map, error = %d\n",
   7786 		    error);
   7787 		goto fail_2;
   7788 	}
   7789 
   7790 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   7791 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   7792 		aprint_error_dev(sc->sc_dev,
   7793 		    "unable to load TX control data DMA map, error = %d\n",
   7794 		    error);
   7795 		goto fail_3;
   7796 	}
   7797 
   7798 	return 0;
   7799 
   7800 fail_3:
   7801 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7802 fail_2:
   7803 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7804 	    WM_TXDESCS_SIZE(txq));
   7805 fail_1:
   7806 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7807 fail_0:
   7808 	return error;
   7809 }
   7810 
   7811 static void
   7812 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7813 {
   7814 
   7815 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   7816 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7817 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7818 	    WM_TXDESCS_SIZE(txq));
   7819 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7820 }
   7821 
   7822 static int
   7823 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7824 {
   7825 	int error;
   7826 	size_t rxq_descs_size;
   7827 
   7828 	/*
   7829 	 * Allocate the control data structures, and create and load the
   7830 	 * DMA map for it.
   7831 	 *
   7832 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7833 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7834 	 * both sets within the same 4G segment.
   7835 	 */
   7836 	rxq->rxq_ndesc = WM_NRXDESC;
   7837 	if (sc->sc_type == WM_T_82574)
   7838 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   7839 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7840 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   7841 	else
   7842 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   7843 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   7844 
   7845 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   7846 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   7847 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   7848 		aprint_error_dev(sc->sc_dev,
   7849 		    "unable to allocate RX control data, error = %d\n",
   7850 		    error);
   7851 		goto fail_0;
   7852 	}
   7853 
   7854 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   7855 		    rxq->rxq_desc_rseg, rxq_descs_size,
   7856 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7857 		aprint_error_dev(sc->sc_dev,
   7858 		    "unable to map RX control data, error = %d\n", error);
   7859 		goto fail_1;
   7860 	}
   7861 
   7862 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   7863 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   7864 		aprint_error_dev(sc->sc_dev,
   7865 		    "unable to create RX control data DMA map, error = %d\n",
   7866 		    error);
   7867 		goto fail_2;
   7868 	}
   7869 
   7870 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   7871 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   7872 		aprint_error_dev(sc->sc_dev,
   7873 		    "unable to load RX control data DMA map, error = %d\n",
   7874 		    error);
   7875 		goto fail_3;
   7876 	}
   7877 
   7878 	return 0;
   7879 
   7880  fail_3:
   7881 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7882  fail_2:
   7883 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7884 	    rxq_descs_size);
   7885  fail_1:
   7886 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7887  fail_0:
   7888 	return error;
   7889 }
   7890 
   7891 static void
   7892 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7893 {
   7894 
   7895 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7896 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7897 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7898 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   7899 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7900 }
   7901 
   7902 
   7903 static int
   7904 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7905 {
   7906 	int i, error;
   7907 
   7908 	/* Create the transmit buffer DMA maps. */
   7909 	WM_TXQUEUELEN(txq) =
   7910 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   7911 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   7912 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7913 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   7914 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   7915 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   7916 			aprint_error_dev(sc->sc_dev,
   7917 			    "unable to create Tx DMA map %d, error = %d\n",
   7918 			    i, error);
   7919 			goto fail;
   7920 		}
   7921 	}
   7922 
   7923 	return 0;
   7924 
   7925 fail:
   7926 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7927 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7928 			bus_dmamap_destroy(sc->sc_dmat,
   7929 			    txq->txq_soft[i].txs_dmamap);
   7930 	}
   7931 	return error;
   7932 }
   7933 
   7934 static void
   7935 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7936 {
   7937 	int i;
   7938 
   7939 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7940 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7941 			bus_dmamap_destroy(sc->sc_dmat,
   7942 			    txq->txq_soft[i].txs_dmamap);
   7943 	}
   7944 }
   7945 
   7946 static int
   7947 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7948 {
   7949 	int i, error;
   7950 
   7951 	/* Create the receive buffer DMA maps. */
   7952 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7953 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   7954 			    MCLBYTES, 0, 0,
   7955 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   7956 			aprint_error_dev(sc->sc_dev,
   7957 			    "unable to create Rx DMA map %d error = %d\n",
   7958 			    i, error);
   7959 			goto fail;
   7960 		}
   7961 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   7962 	}
   7963 
   7964 	return 0;
   7965 
   7966  fail:
   7967 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7968 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7969 			bus_dmamap_destroy(sc->sc_dmat,
   7970 			    rxq->rxq_soft[i].rxs_dmamap);
   7971 	}
   7972 	return error;
   7973 }
   7974 
   7975 static void
   7976 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7977 {
   7978 	int i;
   7979 
   7980 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7981 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7982 			bus_dmamap_destroy(sc->sc_dmat,
   7983 			    rxq->rxq_soft[i].rxs_dmamap);
   7984 	}
   7985 }
   7986 
   7987 /*
   7988  * wm_alloc_quques:
   7989  *	Allocate {tx,rx}descs and {tx,rx} buffers
   7990  */
   7991 static int
   7992 wm_alloc_txrx_queues(struct wm_softc *sc)
   7993 {
   7994 	int i, error, tx_done, rx_done;
   7995 
   7996 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   7997 	    KM_SLEEP);
   7998 	if (sc->sc_queue == NULL) {
   7999 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   8000 		error = ENOMEM;
   8001 		goto fail_0;
   8002 	}
   8003 
   8004 	/* For transmission */
   8005 	error = 0;
   8006 	tx_done = 0;
   8007 	for (i = 0; i < sc->sc_nqueues; i++) {
   8008 #ifdef WM_EVENT_COUNTERS
   8009 		int j;
   8010 		const char *xname;
   8011 #endif
   8012 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   8013 		txq->txq_sc = sc;
   8014 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   8015 
   8016 		error = wm_alloc_tx_descs(sc, txq);
   8017 		if (error)
   8018 			break;
   8019 		error = wm_alloc_tx_buffer(sc, txq);
   8020 		if (error) {
   8021 			wm_free_tx_descs(sc, txq);
   8022 			break;
   8023 		}
   8024 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   8025 		if (txq->txq_interq == NULL) {
   8026 			wm_free_tx_descs(sc, txq);
   8027 			wm_free_tx_buffer(sc, txq);
   8028 			error = ENOMEM;
   8029 			break;
   8030 		}
   8031 
   8032 #ifdef WM_EVENT_COUNTERS
   8033 		xname = device_xname(sc->sc_dev);
   8034 
   8035 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   8036 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   8037 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   8038 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   8039 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   8040 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   8041 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   8042 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   8043 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   8044 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   8045 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   8046 
   8047 		for (j = 0; j < WM_NTXSEGS; j++) {
   8048 			snprintf(txq->txq_txseg_evcnt_names[j],
   8049 			    sizeof(txq->txq_txseg_evcnt_names[j]),
   8050 			    "txq%02dtxseg%d", i, j);
   8051 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j],
   8052 			    EVCNT_TYPE_MISC,
   8053 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   8054 		}
   8055 
   8056 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   8057 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   8058 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   8059 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   8060 		/* Only for 82544 (and earlier?) */
   8061 		if (sc->sc_type <= WM_T_82544)
   8062 			WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   8063 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   8064 #endif /* WM_EVENT_COUNTERS */
   8065 
   8066 		tx_done++;
   8067 	}
   8068 	if (error)
   8069 		goto fail_1;
   8070 
   8071 	/* For receive */
   8072 	error = 0;
   8073 	rx_done = 0;
   8074 	for (i = 0; i < sc->sc_nqueues; i++) {
   8075 #ifdef WM_EVENT_COUNTERS
   8076 		const char *xname;
   8077 #endif
   8078 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   8079 		rxq->rxq_sc = sc;
   8080 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   8081 
   8082 		error = wm_alloc_rx_descs(sc, rxq);
   8083 		if (error)
   8084 			break;
   8085 
   8086 		error = wm_alloc_rx_buffer(sc, rxq);
   8087 		if (error) {
   8088 			wm_free_rx_descs(sc, rxq);
   8089 			break;
   8090 		}
   8091 
   8092 #ifdef WM_EVENT_COUNTERS
   8093 		xname = device_xname(sc->sc_dev);
   8094 
   8095 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   8096 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   8097 
   8098 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   8099 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   8100 #endif /* WM_EVENT_COUNTERS */
   8101 
   8102 		rx_done++;
   8103 	}
   8104 	if (error)
   8105 		goto fail_2;
   8106 
   8107 	return 0;
   8108 
   8109 fail_2:
   8110 	for (i = 0; i < rx_done; i++) {
   8111 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   8112 		wm_free_rx_buffer(sc, rxq);
   8113 		wm_free_rx_descs(sc, rxq);
   8114 		if (rxq->rxq_lock)
   8115 			mutex_obj_free(rxq->rxq_lock);
   8116 	}
   8117 fail_1:
   8118 	for (i = 0; i < tx_done; i++) {
   8119 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   8120 		pcq_destroy(txq->txq_interq);
   8121 		wm_free_tx_buffer(sc, txq);
   8122 		wm_free_tx_descs(sc, txq);
   8123 		if (txq->txq_lock)
   8124 			mutex_obj_free(txq->txq_lock);
   8125 	}
   8126 
   8127 	kmem_free(sc->sc_queue,
   8128 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   8129 fail_0:
   8130 	return error;
   8131 }
   8132 
   8133 /*
   8134  * wm_free_quques:
   8135  *	Free {tx,rx}descs and {tx,rx} buffers
   8136  */
   8137 static void
   8138 wm_free_txrx_queues(struct wm_softc *sc)
   8139 {
   8140 	int i;
   8141 
   8142 	for (i = 0; i < sc->sc_nqueues; i++) {
   8143 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   8144 
   8145 #ifdef WM_EVENT_COUNTERS
   8146 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   8147 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   8148 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   8149 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   8150 #endif /* WM_EVENT_COUNTERS */
   8151 
   8152 		wm_free_rx_buffer(sc, rxq);
   8153 		wm_free_rx_descs(sc, rxq);
   8154 		if (rxq->rxq_lock)
   8155 			mutex_obj_free(rxq->rxq_lock);
   8156 	}
   8157 
   8158 	for (i = 0; i < sc->sc_nqueues; i++) {
   8159 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   8160 		struct mbuf *m;
   8161 #ifdef WM_EVENT_COUNTERS
   8162 		int j;
   8163 
   8164 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   8165 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   8166 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   8167 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   8168 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   8169 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   8170 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   8171 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   8172 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   8173 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   8174 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   8175 
   8176 		for (j = 0; j < WM_NTXSEGS; j++)
   8177 			evcnt_detach(&txq->txq_ev_txseg[j]);
   8178 
   8179 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   8180 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   8181 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   8182 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   8183 		if (sc->sc_type <= WM_T_82544)
   8184 			WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   8185 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   8186 #endif /* WM_EVENT_COUNTERS */
   8187 
   8188 		/* Drain txq_interq */
   8189 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   8190 			m_freem(m);
   8191 		pcq_destroy(txq->txq_interq);
   8192 
   8193 		wm_free_tx_buffer(sc, txq);
   8194 		wm_free_tx_descs(sc, txq);
   8195 		if (txq->txq_lock)
   8196 			mutex_obj_free(txq->txq_lock);
   8197 	}
   8198 
   8199 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   8200 }
   8201 
   8202 static void
   8203 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   8204 {
   8205 
   8206 	KASSERT(mutex_owned(txq->txq_lock));
   8207 
   8208 	/* Initialize the transmit descriptor ring. */
   8209 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   8210 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   8211 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8212 	txq->txq_free = WM_NTXDESC(txq);
   8213 	txq->txq_next = 0;
   8214 }
   8215 
   8216 static void
   8217 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   8218     struct wm_txqueue *txq)
   8219 {
   8220 
   8221 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   8222 		device_xname(sc->sc_dev), __func__));
   8223 	KASSERT(mutex_owned(txq->txq_lock));
   8224 
   8225 	if (sc->sc_type < WM_T_82543) {
   8226 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   8227 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   8228 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   8229 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   8230 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   8231 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   8232 	} else {
   8233 		int qid = wmq->wmq_id;
   8234 
   8235 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   8236 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   8237 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   8238 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   8239 
   8240 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8241 			/*
   8242 			 * Don't write TDT before TCTL.EN is set.
   8243 			 * See the document.
   8244 			 */
   8245 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   8246 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   8247 			    | TXDCTL_WTHRESH(0));
   8248 		else {
   8249 			/* XXX should update with AIM? */
   8250 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   8251 			if (sc->sc_type >= WM_T_82540) {
   8252 				/* Should be the same */
   8253 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   8254 			}
   8255 
   8256 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   8257 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   8258 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   8259 		}
   8260 	}
   8261 }
   8262 
   8263 static void
   8264 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   8265 {
   8266 	int i;
   8267 
   8268 	KASSERT(mutex_owned(txq->txq_lock));
   8269 
   8270 	/* Initialize the transmit job descriptors. */
   8271 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   8272 		txq->txq_soft[i].txs_mbuf = NULL;
   8273 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   8274 	txq->txq_snext = 0;
   8275 	txq->txq_sdirty = 0;
   8276 }
   8277 
   8278 static void
   8279 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   8280     struct wm_txqueue *txq)
   8281 {
   8282 
   8283 	KASSERT(mutex_owned(txq->txq_lock));
   8284 
   8285 	/*
   8286 	 * Set up some register offsets that are different between
   8287 	 * the i82542 and the i82543 and later chips.
   8288 	 */
   8289 	if (sc->sc_type < WM_T_82543)
   8290 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   8291 	else
   8292 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   8293 
   8294 	wm_init_tx_descs(sc, txq);
   8295 	wm_init_tx_regs(sc, wmq, txq);
   8296 	wm_init_tx_buffer(sc, txq);
   8297 
   8298 	/* Clear other than WM_TXQ_LINKDOWN_DISCARD */
   8299 	txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD;
   8300 
   8301 	txq->txq_sending = false;
   8302 }
   8303 
   8304 static void
   8305 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   8306     struct wm_rxqueue *rxq)
   8307 {
   8308 
   8309 	KASSERT(mutex_owned(rxq->rxq_lock));
   8310 
   8311 	/*
   8312 	 * Initialize the receive descriptor and receive job
   8313 	 * descriptor rings.
   8314 	 */
   8315 	if (sc->sc_type < WM_T_82543) {
   8316 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   8317 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   8318 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   8319 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   8320 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   8321 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   8322 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   8323 
   8324 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   8325 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   8326 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   8327 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   8328 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   8329 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   8330 	} else {
   8331 		int qid = wmq->wmq_id;
   8332 
   8333 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   8334 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   8335 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   8336 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   8337 
   8338 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8339 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   8340 				panic("%s: MCLBYTES %d unsupported for 82575 "
   8341 				    "or higher\n", __func__, MCLBYTES);
   8342 
   8343 			/*
   8344 			 * Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF
   8345 			 * only.
   8346 			 */
   8347 			CSR_WRITE(sc, WMREG_SRRCTL(qid),
   8348 			    SRRCTL_DESCTYPE_ADV_ONEBUF
   8349 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   8350 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   8351 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   8352 			    | RXDCTL_WTHRESH(1));
   8353 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   8354 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   8355 		} else {
   8356 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   8357 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   8358 			/* XXX should update with AIM? */
   8359 			CSR_WRITE(sc, WMREG_RDTR,
   8360 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   8361 			/* MUST be same */
   8362 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   8363 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   8364 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   8365 		}
   8366 	}
   8367 }
   8368 
   8369 static int
   8370 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   8371 {
   8372 	struct wm_rxsoft *rxs;
   8373 	int error, i;
   8374 
   8375 	KASSERT(mutex_owned(rxq->rxq_lock));
   8376 
   8377 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   8378 		rxs = &rxq->rxq_soft[i];
   8379 		if (rxs->rxs_mbuf == NULL) {
   8380 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   8381 				log(LOG_ERR, "%s: unable to allocate or map "
   8382 				    "rx buffer %d, error = %d\n",
   8383 				    device_xname(sc->sc_dev), i, error);
   8384 				/*
   8385 				 * XXX Should attempt to run with fewer receive
   8386 				 * XXX buffers instead of just failing.
   8387 				 */
   8388 				wm_rxdrain(rxq);
   8389 				return ENOMEM;
   8390 			}
   8391 		} else {
   8392 			/*
   8393 			 * For 82575 and 82576, the RX descriptors must be
   8394 			 * initialized after the setting of RCTL.EN in
   8395 			 * wm_set_filter()
   8396 			 */
   8397 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   8398 				wm_init_rxdesc(rxq, i);
   8399 		}
   8400 	}
   8401 	rxq->rxq_ptr = 0;
   8402 	rxq->rxq_discard = 0;
   8403 	WM_RXCHAIN_RESET(rxq);
   8404 
   8405 	return 0;
   8406 }
   8407 
   8408 static int
   8409 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   8410     struct wm_rxqueue *rxq)
   8411 {
   8412 
   8413 	KASSERT(mutex_owned(rxq->rxq_lock));
   8414 
   8415 	/*
   8416 	 * Set up some register offsets that are different between
   8417 	 * the i82542 and the i82543 and later chips.
   8418 	 */
   8419 	if (sc->sc_type < WM_T_82543)
   8420 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   8421 	else
   8422 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   8423 
   8424 	wm_init_rx_regs(sc, wmq, rxq);
   8425 	return wm_init_rx_buffer(sc, rxq);
   8426 }
   8427 
   8428 /*
   8429  * wm_init_quques:
   8430  *	Initialize {tx,rx}descs and {tx,rx} buffers
   8431  */
   8432 static int
   8433 wm_init_txrx_queues(struct wm_softc *sc)
   8434 {
   8435 	int i, error = 0;
   8436 
   8437 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   8438 		device_xname(sc->sc_dev), __func__));
   8439 
   8440 	for (i = 0; i < sc->sc_nqueues; i++) {
   8441 		struct wm_queue *wmq = &sc->sc_queue[i];
   8442 		struct wm_txqueue *txq = &wmq->wmq_txq;
   8443 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8444 
   8445 		/*
   8446 		 * TODO
   8447 		 * Currently, use constant variable instead of AIM.
   8448 		 * Furthermore, the interrupt interval of multiqueue which use
   8449 		 * polling mode is less than default value.
   8450 		 * More tuning and AIM are required.
   8451 		 */
   8452 		if (wm_is_using_multiqueue(sc))
   8453 			wmq->wmq_itr = 50;
   8454 		else
   8455 			wmq->wmq_itr = sc->sc_itr_init;
   8456 		wmq->wmq_set_itr = true;
   8457 
   8458 		mutex_enter(txq->txq_lock);
   8459 		wm_init_tx_queue(sc, wmq, txq);
   8460 		mutex_exit(txq->txq_lock);
   8461 
   8462 		mutex_enter(rxq->rxq_lock);
   8463 		error = wm_init_rx_queue(sc, wmq, rxq);
   8464 		mutex_exit(rxq->rxq_lock);
   8465 		if (error)
   8466 			break;
   8467 	}
   8468 
   8469 	return error;
   8470 }
   8471 
   8472 /*
   8473  * wm_tx_offload:
   8474  *
   8475  *	Set up TCP/IP checksumming parameters for the
   8476  *	specified packet.
   8477  */
   8478 static void
   8479 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8480     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   8481 {
   8482 	struct mbuf *m0 = txs->txs_mbuf;
   8483 	struct livengood_tcpip_ctxdesc *t;
   8484 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   8485 	uint32_t ipcse;
   8486 	struct ether_header *eh;
   8487 	int offset, iphl;
   8488 	uint8_t fields;
   8489 
   8490 	/*
   8491 	 * XXX It would be nice if the mbuf pkthdr had offset
   8492 	 * fields for the protocol headers.
   8493 	 */
   8494 
   8495 	eh = mtod(m0, struct ether_header *);
   8496 	switch (htons(eh->ether_type)) {
   8497 	case ETHERTYPE_IP:
   8498 	case ETHERTYPE_IPV6:
   8499 		offset = ETHER_HDR_LEN;
   8500 		break;
   8501 
   8502 	case ETHERTYPE_VLAN:
   8503 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8504 		break;
   8505 
   8506 	default:
   8507 		/* Don't support this protocol or encapsulation. */
   8508 		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8509 		txq->txq_last_hw_ipcs = 0;
   8510 		txq->txq_last_hw_tucs = 0;
   8511 		*fieldsp = 0;
   8512 		*cmdp = 0;
   8513 		return;
   8514 	}
   8515 
   8516 	if ((m0->m_pkthdr.csum_flags &
   8517 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8518 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8519 	} else
   8520 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8521 
   8522 	ipcse = offset + iphl - 1;
   8523 
   8524 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   8525 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   8526 	seg = 0;
   8527 	fields = 0;
   8528 
   8529 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8530 		int hlen = offset + iphl;
   8531 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8532 
   8533 		if (__predict_false(m0->m_len <
   8534 				    (hlen + sizeof(struct tcphdr)))) {
   8535 			/*
   8536 			 * TCP/IP headers are not in the first mbuf; we need
   8537 			 * to do this the slow and painful way. Let's just
   8538 			 * hope this doesn't happen very often.
   8539 			 */
   8540 			struct tcphdr th;
   8541 
   8542 			WM_Q_EVCNT_INCR(txq, tsopain);
   8543 
   8544 			m_copydata(m0, hlen, sizeof(th), &th);
   8545 			if (v4) {
   8546 				struct ip ip;
   8547 
   8548 				m_copydata(m0, offset, sizeof(ip), &ip);
   8549 				ip.ip_len = 0;
   8550 				m_copyback(m0,
   8551 				    offset + offsetof(struct ip, ip_len),
   8552 				    sizeof(ip.ip_len), &ip.ip_len);
   8553 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8554 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8555 			} else {
   8556 				struct ip6_hdr ip6;
   8557 
   8558 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8559 				ip6.ip6_plen = 0;
   8560 				m_copyback(m0,
   8561 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8562 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8563 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8564 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8565 			}
   8566 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8567 			    sizeof(th.th_sum), &th.th_sum);
   8568 
   8569 			hlen += th.th_off << 2;
   8570 		} else {
   8571 			/*
   8572 			 * TCP/IP headers are in the first mbuf; we can do
   8573 			 * this the easy way.
   8574 			 */
   8575 			struct tcphdr *th;
   8576 
   8577 			if (v4) {
   8578 				struct ip *ip =
   8579 				    (void *)(mtod(m0, char *) + offset);
   8580 				th = (void *)(mtod(m0, char *) + hlen);
   8581 
   8582 				ip->ip_len = 0;
   8583 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8584 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8585 			} else {
   8586 				struct ip6_hdr *ip6 =
   8587 				    (void *)(mtod(m0, char *) + offset);
   8588 				th = (void *)(mtod(m0, char *) + hlen);
   8589 
   8590 				ip6->ip6_plen = 0;
   8591 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8592 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8593 			}
   8594 			hlen += th->th_off << 2;
   8595 		}
   8596 
   8597 		if (v4) {
   8598 			WM_Q_EVCNT_INCR(txq, tso);
   8599 			cmdlen |= WTX_TCPIP_CMD_IP;
   8600 		} else {
   8601 			WM_Q_EVCNT_INCR(txq, tso6);
   8602 			ipcse = 0;
   8603 		}
   8604 		cmd |= WTX_TCPIP_CMD_TSE;
   8605 		cmdlen |= WTX_TCPIP_CMD_TSE |
   8606 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   8607 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   8608 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   8609 	}
   8610 
   8611 	/*
   8612 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   8613 	 * offload feature, if we load the context descriptor, we
   8614 	 * MUST provide valid values for IPCSS and TUCSS fields.
   8615 	 */
   8616 
   8617 	ipcs = WTX_TCPIP_IPCSS(offset) |
   8618 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   8619 	    WTX_TCPIP_IPCSE(ipcse);
   8620 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   8621 		WM_Q_EVCNT_INCR(txq, ipsum);
   8622 		fields |= WTX_IXSM;
   8623 	}
   8624 
   8625 	offset += iphl;
   8626 
   8627 	if (m0->m_pkthdr.csum_flags &
   8628 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   8629 		WM_Q_EVCNT_INCR(txq, tusum);
   8630 		fields |= WTX_TXSM;
   8631 		tucs = WTX_TCPIP_TUCSS(offset) |
   8632 		    WTX_TCPIP_TUCSO(offset +
   8633 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   8634 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8635 	} else if ((m0->m_pkthdr.csum_flags &
   8636 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   8637 		WM_Q_EVCNT_INCR(txq, tusum6);
   8638 		fields |= WTX_TXSM;
   8639 		tucs = WTX_TCPIP_TUCSS(offset) |
   8640 		    WTX_TCPIP_TUCSO(offset +
   8641 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   8642 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8643 	} else {
   8644 		/* Just initialize it to a valid TCP context. */
   8645 		tucs = WTX_TCPIP_TUCSS(offset) |
   8646 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   8647 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8648 	}
   8649 
   8650 	*cmdp = cmd;
   8651 	*fieldsp = fields;
   8652 
   8653 	/*
   8654 	 * We don't have to write context descriptor for every packet
   8655 	 * except for 82574. For 82574, we must write context descriptor
   8656 	 * for every packet when we use two descriptor queues.
   8657 	 *
   8658 	 * The 82574L can only remember the *last* context used
   8659 	 * regardless of queue that it was use for.  We cannot reuse
   8660 	 * contexts on this hardware platform and must generate a new
   8661 	 * context every time.  82574L hardware spec, section 7.2.6,
   8662 	 * second note.
   8663 	 */
   8664 	if (sc->sc_nqueues < 2) {
   8665 		/*
   8666 		 * Setting up new checksum offload context for every
   8667 		 * frames takes a lot of processing time for hardware.
   8668 		 * This also reduces performance a lot for small sized
   8669 		 * frames so avoid it if driver can use previously
   8670 		 * configured checksum offload context.
   8671 		 * For TSO, in theory we can use the same TSO context only if
   8672 		 * frame is the same type(IP/TCP) and the same MSS. However
   8673 		 * checking whether a frame has the same IP/TCP structure is a
   8674 		 * hard thing so just ignore that and always restablish a
   8675 		 * new TSO context.
   8676 		 */
   8677 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   8678 		    == 0) {
   8679 			if (txq->txq_last_hw_cmd == cmd &&
   8680 			    txq->txq_last_hw_fields == fields &&
   8681 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   8682 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   8683 				WM_Q_EVCNT_INCR(txq, skipcontext);
   8684 				return;
   8685 			}
   8686 		}
   8687 
   8688 		txq->txq_last_hw_cmd = cmd;
   8689 		txq->txq_last_hw_fields = fields;
   8690 		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   8691 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   8692 	}
   8693 
   8694 	/* Fill in the context descriptor. */
   8695 	t = (struct livengood_tcpip_ctxdesc *)
   8696 	    &txq->txq_descs[txq->txq_next];
   8697 	t->tcpip_ipcs = htole32(ipcs);
   8698 	t->tcpip_tucs = htole32(tucs);
   8699 	t->tcpip_cmdlen = htole32(cmdlen);
   8700 	t->tcpip_seg = htole32(seg);
   8701 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8702 
   8703 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8704 	txs->txs_ndesc++;
   8705 }
   8706 
   8707 static inline int
   8708 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   8709 {
   8710 	struct wm_softc *sc = ifp->if_softc;
   8711 	u_int cpuid = cpu_index(curcpu());
   8712 
   8713 	/*
   8714 	 * Currently, simple distribute strategy.
   8715 	 * TODO:
   8716 	 * distribute by flowid(RSS has value).
   8717 	 */
   8718 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   8719 }
   8720 
   8721 static inline bool
   8722 wm_linkdown_discard(struct wm_txqueue *txq)
   8723 {
   8724 
   8725 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
   8726 		return true;
   8727 
   8728 	return false;
   8729 }
   8730 
   8731 /*
   8732  * wm_start:		[ifnet interface function]
   8733  *
   8734  *	Start packet transmission on the interface.
   8735  */
   8736 static void
   8737 wm_start(struct ifnet *ifp)
   8738 {
   8739 	struct wm_softc *sc = ifp->if_softc;
   8740 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8741 
   8742 	KASSERT(if_is_mpsafe(ifp));
   8743 	/*
   8744 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8745 	 */
   8746 
   8747 	mutex_enter(txq->txq_lock);
   8748 	if (!txq->txq_stopping)
   8749 		wm_start_locked(ifp);
   8750 	mutex_exit(txq->txq_lock);
   8751 }
   8752 
   8753 static void
   8754 wm_start_locked(struct ifnet *ifp)
   8755 {
   8756 	struct wm_softc *sc = ifp->if_softc;
   8757 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8758 
   8759 	wm_send_common_locked(ifp, txq, false);
   8760 }
   8761 
   8762 static int
   8763 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   8764 {
   8765 	int qid;
   8766 	struct wm_softc *sc = ifp->if_softc;
   8767 	struct wm_txqueue *txq;
   8768 
   8769 	qid = wm_select_txqueue(ifp, m);
   8770 	txq = &sc->sc_queue[qid].wmq_txq;
   8771 
   8772 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8773 		m_freem(m);
   8774 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8775 		return ENOBUFS;
   8776 	}
   8777 
   8778 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8779 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8780 	if (m->m_flags & M_MCAST)
   8781 		if_statinc_ref(nsr, if_omcasts);
   8782 	IF_STAT_PUTREF(ifp);
   8783 
   8784 	if (mutex_tryenter(txq->txq_lock)) {
   8785 		if (!txq->txq_stopping)
   8786 			wm_transmit_locked(ifp, txq);
   8787 		mutex_exit(txq->txq_lock);
   8788 	}
   8789 
   8790 	return 0;
   8791 }
   8792 
   8793 static void
   8794 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8795 {
   8796 
   8797 	wm_send_common_locked(ifp, txq, true);
   8798 }
   8799 
   8800 static void
   8801 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8802     bool is_transmit)
   8803 {
   8804 	struct wm_softc *sc = ifp->if_softc;
   8805 	struct mbuf *m0;
   8806 	struct wm_txsoft *txs;
   8807 	bus_dmamap_t dmamap;
   8808 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   8809 	bus_addr_t curaddr;
   8810 	bus_size_t seglen, curlen;
   8811 	uint32_t cksumcmd;
   8812 	uint8_t cksumfields;
   8813 	bool remap = true;
   8814 
   8815 	KASSERT(mutex_owned(txq->txq_lock));
   8816 	KASSERT(!txq->txq_stopping);
   8817 
   8818 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8819 		return;
   8820 
   8821 	if (__predict_false(wm_linkdown_discard(txq))) {
   8822 		do {
   8823 			if (is_transmit)
   8824 				m0 = pcq_get(txq->txq_interq);
   8825 			else
   8826 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   8827 			/*
   8828 			 * increment successed packet counter as in the case
   8829 			 * which the packet is discarded by link down PHY.
   8830 			 */
   8831 			if (m0 != NULL) {
   8832 				if_statinc(ifp, if_opackets);
   8833 				m_freem(m0);
   8834 			}
   8835 		} while (m0 != NULL);
   8836 		return;
   8837 	}
   8838 
   8839 	/* Remember the previous number of free descriptors. */
   8840 	ofree = txq->txq_free;
   8841 
   8842 	/*
   8843 	 * Loop through the send queue, setting up transmit descriptors
   8844 	 * until we drain the queue, or use up all available transmit
   8845 	 * descriptors.
   8846 	 */
   8847 	for (;;) {
   8848 		m0 = NULL;
   8849 
   8850 		/* Get a work queue entry. */
   8851 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8852 			wm_txeof(txq, UINT_MAX);
   8853 			if (txq->txq_sfree == 0) {
   8854 				DPRINTF(sc, WM_DEBUG_TX,
   8855 				    ("%s: TX: no free job descriptors\n",
   8856 					device_xname(sc->sc_dev)));
   8857 				WM_Q_EVCNT_INCR(txq, txsstall);
   8858 				break;
   8859 			}
   8860 		}
   8861 
   8862 		/* Grab a packet off the queue. */
   8863 		if (is_transmit)
   8864 			m0 = pcq_get(txq->txq_interq);
   8865 		else
   8866 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8867 		if (m0 == NULL)
   8868 			break;
   8869 
   8870 		DPRINTF(sc, WM_DEBUG_TX,
   8871 		    ("%s: TX: have packet to transmit: %p\n",
   8872 			device_xname(sc->sc_dev), m0));
   8873 
   8874 		txs = &txq->txq_soft[txq->txq_snext];
   8875 		dmamap = txs->txs_dmamap;
   8876 
   8877 		use_tso = (m0->m_pkthdr.csum_flags &
   8878 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   8879 
   8880 		/*
   8881 		 * So says the Linux driver:
   8882 		 * The controller does a simple calculation to make sure
   8883 		 * there is enough room in the FIFO before initiating the
   8884 		 * DMA for each buffer. The calc is:
   8885 		 *	4 = ceil(buffer len / MSS)
   8886 		 * To make sure we don't overrun the FIFO, adjust the max
   8887 		 * buffer len if the MSS drops.
   8888 		 */
   8889 		dmamap->dm_maxsegsz =
   8890 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   8891 		    ? m0->m_pkthdr.segsz << 2
   8892 		    : WTX_MAX_LEN;
   8893 
   8894 		/*
   8895 		 * Load the DMA map.  If this fails, the packet either
   8896 		 * didn't fit in the allotted number of segments, or we
   8897 		 * were short on resources.  For the too-many-segments
   8898 		 * case, we simply report an error and drop the packet,
   8899 		 * since we can't sanely copy a jumbo packet to a single
   8900 		 * buffer.
   8901 		 */
   8902 retry:
   8903 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8904 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8905 		if (__predict_false(error)) {
   8906 			if (error == EFBIG) {
   8907 				if (remap == true) {
   8908 					struct mbuf *m;
   8909 
   8910 					remap = false;
   8911 					m = m_defrag(m0, M_NOWAIT);
   8912 					if (m != NULL) {
   8913 						WM_Q_EVCNT_INCR(txq, defrag);
   8914 						m0 = m;
   8915 						goto retry;
   8916 					}
   8917 				}
   8918 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8919 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8920 				    "DMA segments, dropping...\n",
   8921 				    device_xname(sc->sc_dev));
   8922 				wm_dump_mbuf_chain(sc, m0);
   8923 				m_freem(m0);
   8924 				continue;
   8925 			}
   8926 			/* Short on resources, just stop for now. */
   8927 			DPRINTF(sc, WM_DEBUG_TX,
   8928 			    ("%s: TX: dmamap load failed: %d\n",
   8929 				device_xname(sc->sc_dev), error));
   8930 			break;
   8931 		}
   8932 
   8933 		segs_needed = dmamap->dm_nsegs;
   8934 		if (use_tso) {
   8935 			/* For sentinel descriptor; see below. */
   8936 			segs_needed++;
   8937 		}
   8938 
   8939 		/*
   8940 		 * Ensure we have enough descriptors free to describe
   8941 		 * the packet. Note, we always reserve one descriptor
   8942 		 * at the end of the ring due to the semantics of the
   8943 		 * TDT register, plus one more in the event we need
   8944 		 * to load offload context.
   8945 		 */
   8946 		if (segs_needed > txq->txq_free - 2) {
   8947 			/*
   8948 			 * Not enough free descriptors to transmit this
   8949 			 * packet.  We haven't committed anything yet,
   8950 			 * so just unload the DMA map, put the packet
   8951 			 * pack on the queue, and punt. Notify the upper
   8952 			 * layer that there are no more slots left.
   8953 			 */
   8954 			DPRINTF(sc, WM_DEBUG_TX,
   8955 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8956 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8957 				segs_needed, txq->txq_free - 1));
   8958 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8959 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8960 			WM_Q_EVCNT_INCR(txq, txdstall);
   8961 			break;
   8962 		}
   8963 
   8964 		/*
   8965 		 * Check for 82547 Tx FIFO bug. We need to do this
   8966 		 * once we know we can transmit the packet, since we
   8967 		 * do some internal FIFO space accounting here.
   8968 		 */
   8969 		if (sc->sc_type == WM_T_82547 &&
   8970 		    wm_82547_txfifo_bugchk(sc, m0)) {
   8971 			DPRINTF(sc, WM_DEBUG_TX,
   8972 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   8973 				device_xname(sc->sc_dev)));
   8974 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8975 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8976 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   8977 			break;
   8978 		}
   8979 
   8980 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8981 
   8982 		DPRINTF(sc, WM_DEBUG_TX,
   8983 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8984 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8985 
   8986 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8987 
   8988 		/*
   8989 		 * Store a pointer to the packet so that we can free it
   8990 		 * later.
   8991 		 *
   8992 		 * Initially, we consider the number of descriptors the
   8993 		 * packet uses the number of DMA segments.  This may be
   8994 		 * incremented by 1 if we do checksum offload (a descriptor
   8995 		 * is used to set the checksum context).
   8996 		 */
   8997 		txs->txs_mbuf = m0;
   8998 		txs->txs_firstdesc = txq->txq_next;
   8999 		txs->txs_ndesc = segs_needed;
   9000 
   9001 		/* Set up offload parameters for this packet. */
   9002 		if (m0->m_pkthdr.csum_flags &
   9003 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   9004 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9005 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   9006 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   9007 		} else {
   9008 			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   9009 			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   9010 			cksumcmd = 0;
   9011 			cksumfields = 0;
   9012 		}
   9013 
   9014 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   9015 
   9016 		/* Sync the DMA map. */
   9017 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   9018 		    BUS_DMASYNC_PREWRITE);
   9019 
   9020 		/* Initialize the transmit descriptor. */
   9021 		for (nexttx = txq->txq_next, seg = 0;
   9022 		     seg < dmamap->dm_nsegs; seg++) {
   9023 			for (seglen = dmamap->dm_segs[seg].ds_len,
   9024 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   9025 			     seglen != 0;
   9026 			     curaddr += curlen, seglen -= curlen,
   9027 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   9028 				curlen = seglen;
   9029 
   9030 				/*
   9031 				 * So says the Linux driver:
   9032 				 * Work around for premature descriptor
   9033 				 * write-backs in TSO mode.  Append a
   9034 				 * 4-byte sentinel descriptor.
   9035 				 */
   9036 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   9037 				    curlen > 8)
   9038 					curlen -= 4;
   9039 
   9040 				wm_set_dma_addr(
   9041 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   9042 				txq->txq_descs[nexttx].wtx_cmdlen
   9043 				    = htole32(cksumcmd | curlen);
   9044 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   9045 				    = 0;
   9046 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   9047 				    = cksumfields;
   9048 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   9049 				lasttx = nexttx;
   9050 
   9051 				DPRINTF(sc, WM_DEBUG_TX,
   9052 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   9053 					"len %#04zx\n",
   9054 					device_xname(sc->sc_dev), nexttx,
   9055 					(uint64_t)curaddr, curlen));
   9056 			}
   9057 		}
   9058 
   9059 		KASSERT(lasttx != -1);
   9060 
   9061 		/*
   9062 		 * Set up the command byte on the last descriptor of
   9063 		 * the packet. If we're in the interrupt delay window,
   9064 		 * delay the interrupt.
   9065 		 */
   9066 		txq->txq_descs[lasttx].wtx_cmdlen |=
   9067 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   9068 
   9069 		/*
   9070 		 * If VLANs are enabled and the packet has a VLAN tag, set
   9071 		 * up the descriptor to encapsulate the packet for us.
   9072 		 *
   9073 		 * This is only valid on the last descriptor of the packet.
   9074 		 */
   9075 		if (vlan_has_tag(m0)) {
   9076 			txq->txq_descs[lasttx].wtx_cmdlen |=
   9077 			    htole32(WTX_CMD_VLE);
   9078 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   9079 			    = htole16(vlan_get_tag(m0));
   9080 		}
   9081 
   9082 		txs->txs_lastdesc = lasttx;
   9083 
   9084 		DPRINTF(sc, WM_DEBUG_TX,
   9085 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   9086 			device_xname(sc->sc_dev),
   9087 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   9088 
   9089 		/* Sync the descriptors we're using. */
   9090 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   9091 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   9092 
   9093 		/* Give the packet to the chip. */
   9094 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   9095 
   9096 		DPRINTF(sc, WM_DEBUG_TX,
   9097 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   9098 
   9099 		DPRINTF(sc, WM_DEBUG_TX,
   9100 		    ("%s: TX: finished transmitting packet, job %d\n",
   9101 			device_xname(sc->sc_dev), txq->txq_snext));
   9102 
   9103 		/* Advance the tx pointer. */
   9104 		txq->txq_free -= txs->txs_ndesc;
   9105 		txq->txq_next = nexttx;
   9106 
   9107 		txq->txq_sfree--;
   9108 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   9109 
   9110 		/* Pass the packet to any BPF listeners. */
   9111 		bpf_mtap(ifp, m0, BPF_D_OUT);
   9112 	}
   9113 
   9114 	if (m0 != NULL) {
   9115 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9116 		WM_Q_EVCNT_INCR(txq, descdrop);
   9117 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   9118 			__func__));
   9119 		m_freem(m0);
   9120 	}
   9121 
   9122 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   9123 		/* No more slots; notify upper layer. */
   9124 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9125 	}
   9126 
   9127 	if (txq->txq_free != ofree) {
   9128 		/* Set a watchdog timer in case the chip flakes out. */
   9129 		txq->txq_lastsent = time_uptime;
   9130 		txq->txq_sending = true;
   9131 	}
   9132 }
   9133 
   9134 /*
   9135  * wm_nq_tx_offload:
   9136  *
   9137  *	Set up TCP/IP checksumming parameters for the
   9138  *	specified packet, for NEWQUEUE devices
   9139  */
   9140 static void
   9141 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   9142     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   9143 {
   9144 	struct mbuf *m0 = txs->txs_mbuf;
   9145 	uint32_t vl_len, mssidx, cmdc;
   9146 	struct ether_header *eh;
   9147 	int offset, iphl;
   9148 
   9149 	/*
   9150 	 * XXX It would be nice if the mbuf pkthdr had offset
   9151 	 * fields for the protocol headers.
   9152 	 */
   9153 	*cmdlenp = 0;
   9154 	*fieldsp = 0;
   9155 
   9156 	eh = mtod(m0, struct ether_header *);
   9157 	switch (htons(eh->ether_type)) {
   9158 	case ETHERTYPE_IP:
   9159 	case ETHERTYPE_IPV6:
   9160 		offset = ETHER_HDR_LEN;
   9161 		break;
   9162 
   9163 	case ETHERTYPE_VLAN:
   9164 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   9165 		break;
   9166 
   9167 	default:
   9168 		/* Don't support this protocol or encapsulation. */
   9169 		*do_csum = false;
   9170 		return;
   9171 	}
   9172 	*do_csum = true;
   9173 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   9174 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   9175 
   9176 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   9177 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   9178 
   9179 	if ((m0->m_pkthdr.csum_flags &
   9180 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   9181 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   9182 	} else {
   9183 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   9184 	}
   9185 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   9186 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   9187 
   9188 	if (vlan_has_tag(m0)) {
   9189 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   9190 		    << NQTXC_VLLEN_VLAN_SHIFT);
   9191 		*cmdlenp |= NQTX_CMD_VLE;
   9192 	}
   9193 
   9194 	mssidx = 0;
   9195 
   9196 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   9197 		int hlen = offset + iphl;
   9198 		int tcp_hlen;
   9199 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   9200 
   9201 		if (__predict_false(m0->m_len <
   9202 				    (hlen + sizeof(struct tcphdr)))) {
   9203 			/*
   9204 			 * TCP/IP headers are not in the first mbuf; we need
   9205 			 * to do this the slow and painful way. Let's just
   9206 			 * hope this doesn't happen very often.
   9207 			 */
   9208 			struct tcphdr th;
   9209 
   9210 			WM_Q_EVCNT_INCR(txq, tsopain);
   9211 
   9212 			m_copydata(m0, hlen, sizeof(th), &th);
   9213 			if (v4) {
   9214 				struct ip ip;
   9215 
   9216 				m_copydata(m0, offset, sizeof(ip), &ip);
   9217 				ip.ip_len = 0;
   9218 				m_copyback(m0,
   9219 				    offset + offsetof(struct ip, ip_len),
   9220 				    sizeof(ip.ip_len), &ip.ip_len);
   9221 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   9222 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   9223 			} else {
   9224 				struct ip6_hdr ip6;
   9225 
   9226 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   9227 				ip6.ip6_plen = 0;
   9228 				m_copyback(m0,
   9229 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   9230 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   9231 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   9232 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   9233 			}
   9234 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   9235 			    sizeof(th.th_sum), &th.th_sum);
   9236 
   9237 			tcp_hlen = th.th_off << 2;
   9238 		} else {
   9239 			/*
   9240 			 * TCP/IP headers are in the first mbuf; we can do
   9241 			 * this the easy way.
   9242 			 */
   9243 			struct tcphdr *th;
   9244 
   9245 			if (v4) {
   9246 				struct ip *ip =
   9247 				    (void *)(mtod(m0, char *) + offset);
   9248 				th = (void *)(mtod(m0, char *) + hlen);
   9249 
   9250 				ip->ip_len = 0;
   9251 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   9252 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   9253 			} else {
   9254 				struct ip6_hdr *ip6 =
   9255 				    (void *)(mtod(m0, char *) + offset);
   9256 				th = (void *)(mtod(m0, char *) + hlen);
   9257 
   9258 				ip6->ip6_plen = 0;
   9259 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   9260 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   9261 			}
   9262 			tcp_hlen = th->th_off << 2;
   9263 		}
   9264 		hlen += tcp_hlen;
   9265 		*cmdlenp |= NQTX_CMD_TSE;
   9266 
   9267 		if (v4) {
   9268 			WM_Q_EVCNT_INCR(txq, tso);
   9269 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   9270 		} else {
   9271 			WM_Q_EVCNT_INCR(txq, tso6);
   9272 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   9273 		}
   9274 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   9275 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   9276 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   9277 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   9278 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   9279 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   9280 	} else {
   9281 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   9282 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   9283 	}
   9284 
   9285 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   9286 		*fieldsp |= NQTXD_FIELDS_IXSM;
   9287 		cmdc |= NQTXC_CMD_IP4;
   9288 	}
   9289 
   9290 	if (m0->m_pkthdr.csum_flags &
   9291 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   9292 		WM_Q_EVCNT_INCR(txq, tusum);
   9293 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   9294 			cmdc |= NQTXC_CMD_TCP;
   9295 		else
   9296 			cmdc |= NQTXC_CMD_UDP;
   9297 
   9298 		cmdc |= NQTXC_CMD_IP4;
   9299 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   9300 	}
   9301 	if (m0->m_pkthdr.csum_flags &
   9302 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   9303 		WM_Q_EVCNT_INCR(txq, tusum6);
   9304 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   9305 			cmdc |= NQTXC_CMD_TCP;
   9306 		else
   9307 			cmdc |= NQTXC_CMD_UDP;
   9308 
   9309 		cmdc |= NQTXC_CMD_IP6;
   9310 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   9311 	}
   9312 
   9313 	/*
   9314 	 * We don't have to write context descriptor for every packet to
   9315 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   9316 	 * I210 and I211. It is enough to write once per a Tx queue for these
   9317 	 * controllers.
   9318 	 * It would be overhead to write context descriptor for every packet,
   9319 	 * however it does not cause problems.
   9320 	 */
   9321 	/* Fill in the context descriptor. */
   9322 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_vl_len =
   9323 	    htole32(vl_len);
   9324 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_sn = 0;
   9325 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_cmd =
   9326 	    htole32(cmdc);
   9327 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_mssidx =
   9328 	    htole32(mssidx);
   9329 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   9330 	DPRINTF(sc, WM_DEBUG_TX,
   9331 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   9332 		txq->txq_next, 0, vl_len));
   9333 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   9334 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   9335 	txs->txs_ndesc++;
   9336 }
   9337 
   9338 /*
   9339  * wm_nq_start:		[ifnet interface function]
   9340  *
   9341  *	Start packet transmission on the interface for NEWQUEUE devices
   9342  */
   9343 static void
   9344 wm_nq_start(struct ifnet *ifp)
   9345 {
   9346 	struct wm_softc *sc = ifp->if_softc;
   9347 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   9348 
   9349 	KASSERT(if_is_mpsafe(ifp));
   9350 	/*
   9351 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   9352 	 */
   9353 
   9354 	mutex_enter(txq->txq_lock);
   9355 	if (!txq->txq_stopping)
   9356 		wm_nq_start_locked(ifp);
   9357 	mutex_exit(txq->txq_lock);
   9358 }
   9359 
   9360 static void
   9361 wm_nq_start_locked(struct ifnet *ifp)
   9362 {
   9363 	struct wm_softc *sc = ifp->if_softc;
   9364 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   9365 
   9366 	wm_nq_send_common_locked(ifp, txq, false);
   9367 }
   9368 
   9369 static int
   9370 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   9371 {
   9372 	int qid;
   9373 	struct wm_softc *sc = ifp->if_softc;
   9374 	struct wm_txqueue *txq;
   9375 
   9376 	qid = wm_select_txqueue(ifp, m);
   9377 	txq = &sc->sc_queue[qid].wmq_txq;
   9378 
   9379 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   9380 		m_freem(m);
   9381 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   9382 		return ENOBUFS;
   9383 	}
   9384 
   9385 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   9386 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   9387 	if (m->m_flags & M_MCAST)
   9388 		if_statinc_ref(nsr, if_omcasts);
   9389 	IF_STAT_PUTREF(ifp);
   9390 
   9391 	/*
   9392 	 * The situations which this mutex_tryenter() fails at running time
   9393 	 * are below two patterns.
   9394 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   9395 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   9396 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   9397 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   9398 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   9399 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   9400 	 * stuck, either.
   9401 	 */
   9402 	if (mutex_tryenter(txq->txq_lock)) {
   9403 		if (!txq->txq_stopping)
   9404 			wm_nq_transmit_locked(ifp, txq);
   9405 		mutex_exit(txq->txq_lock);
   9406 	}
   9407 
   9408 	return 0;
   9409 }
   9410 
   9411 static void
   9412 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   9413 {
   9414 
   9415 	wm_nq_send_common_locked(ifp, txq, true);
   9416 }
   9417 
   9418 static void
   9419 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   9420     bool is_transmit)
   9421 {
   9422 	struct wm_softc *sc = ifp->if_softc;
   9423 	struct mbuf *m0;
   9424 	struct wm_txsoft *txs;
   9425 	bus_dmamap_t dmamap;
   9426 	int error, nexttx, lasttx = -1, seg, segs_needed;
   9427 	bool do_csum, sent;
   9428 	bool remap = true;
   9429 
   9430 	KASSERT(mutex_owned(txq->txq_lock));
   9431 	KASSERT(!txq->txq_stopping);
   9432 
   9433 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   9434 		return;
   9435 
   9436 	if (__predict_false(wm_linkdown_discard(txq))) {
   9437 		do {
   9438 			if (is_transmit)
   9439 				m0 = pcq_get(txq->txq_interq);
   9440 			else
   9441 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   9442 			/*
   9443 			 * increment successed packet counter as in the case
   9444 			 * which the packet is discarded by link down PHY.
   9445 			 */
   9446 			if (m0 != NULL) {
   9447 				if_statinc(ifp, if_opackets);
   9448 				m_freem(m0);
   9449 			}
   9450 		} while (m0 != NULL);
   9451 		return;
   9452 	}
   9453 
   9454 	sent = false;
   9455 
   9456 	/*
   9457 	 * Loop through the send queue, setting up transmit descriptors
   9458 	 * until we drain the queue, or use up all available transmit
   9459 	 * descriptors.
   9460 	 */
   9461 	for (;;) {
   9462 		m0 = NULL;
   9463 
   9464 		/* Get a work queue entry. */
   9465 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   9466 			wm_txeof(txq, UINT_MAX);
   9467 			if (txq->txq_sfree == 0) {
   9468 				DPRINTF(sc, WM_DEBUG_TX,
   9469 				    ("%s: TX: no free job descriptors\n",
   9470 					device_xname(sc->sc_dev)));
   9471 				WM_Q_EVCNT_INCR(txq, txsstall);
   9472 				break;
   9473 			}
   9474 		}
   9475 
   9476 		/* Grab a packet off the queue. */
   9477 		if (is_transmit)
   9478 			m0 = pcq_get(txq->txq_interq);
   9479 		else
   9480 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   9481 		if (m0 == NULL)
   9482 			break;
   9483 
   9484 		DPRINTF(sc, WM_DEBUG_TX,
   9485 		    ("%s: TX: have packet to transmit: %p\n",
   9486 			device_xname(sc->sc_dev), m0));
   9487 
   9488 		txs = &txq->txq_soft[txq->txq_snext];
   9489 		dmamap = txs->txs_dmamap;
   9490 
   9491 		/*
   9492 		 * Load the DMA map.  If this fails, the packet either
   9493 		 * didn't fit in the allotted number of segments, or we
   9494 		 * were short on resources.  For the too-many-segments
   9495 		 * case, we simply report an error and drop the packet,
   9496 		 * since we can't sanely copy a jumbo packet to a single
   9497 		 * buffer.
   9498 		 */
   9499 retry:
   9500 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   9501 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   9502 		if (__predict_false(error)) {
   9503 			if (error == EFBIG) {
   9504 				if (remap == true) {
   9505 					struct mbuf *m;
   9506 
   9507 					remap = false;
   9508 					m = m_defrag(m0, M_NOWAIT);
   9509 					if (m != NULL) {
   9510 						WM_Q_EVCNT_INCR(txq, defrag);
   9511 						m0 = m;
   9512 						goto retry;
   9513 					}
   9514 				}
   9515 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   9516 				log(LOG_ERR, "%s: Tx packet consumes too many "
   9517 				    "DMA segments, dropping...\n",
   9518 				    device_xname(sc->sc_dev));
   9519 				wm_dump_mbuf_chain(sc, m0);
   9520 				m_freem(m0);
   9521 				continue;
   9522 			}
   9523 			/* Short on resources, just stop for now. */
   9524 			DPRINTF(sc, WM_DEBUG_TX,
   9525 			    ("%s: TX: dmamap load failed: %d\n",
   9526 				device_xname(sc->sc_dev), error));
   9527 			break;
   9528 		}
   9529 
   9530 		segs_needed = dmamap->dm_nsegs;
   9531 
   9532 		/*
   9533 		 * Ensure we have enough descriptors free to describe
   9534 		 * the packet. Note, we always reserve one descriptor
   9535 		 * at the end of the ring due to the semantics of the
   9536 		 * TDT register, plus one more in the event we need
   9537 		 * to load offload context.
   9538 		 */
   9539 		if (segs_needed > txq->txq_free - 2) {
   9540 			/*
   9541 			 * Not enough free descriptors to transmit this
   9542 			 * packet.  We haven't committed anything yet,
   9543 			 * so just unload the DMA map, put the packet
   9544 			 * pack on the queue, and punt. Notify the upper
   9545 			 * layer that there are no more slots left.
   9546 			 */
   9547 			DPRINTF(sc, WM_DEBUG_TX,
   9548 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   9549 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   9550 				segs_needed, txq->txq_free - 1));
   9551 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   9552 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   9553 			WM_Q_EVCNT_INCR(txq, txdstall);
   9554 			break;
   9555 		}
   9556 
   9557 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   9558 
   9559 		DPRINTF(sc, WM_DEBUG_TX,
   9560 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   9561 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   9562 
   9563 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   9564 
   9565 		/*
   9566 		 * Store a pointer to the packet so that we can free it
   9567 		 * later.
   9568 		 *
   9569 		 * Initially, we consider the number of descriptors the
   9570 		 * packet uses the number of DMA segments.  This may be
   9571 		 * incremented by 1 if we do checksum offload (a descriptor
   9572 		 * is used to set the checksum context).
   9573 		 */
   9574 		txs->txs_mbuf = m0;
   9575 		txs->txs_firstdesc = txq->txq_next;
   9576 		txs->txs_ndesc = segs_needed;
   9577 
   9578 		/* Set up offload parameters for this packet. */
   9579 		uint32_t cmdlen, fields, dcmdlen;
   9580 		if (m0->m_pkthdr.csum_flags &
   9581 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   9582 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9583 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   9584 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   9585 			    &do_csum);
   9586 		} else {
   9587 			do_csum = false;
   9588 			cmdlen = 0;
   9589 			fields = 0;
   9590 		}
   9591 
   9592 		/* Sync the DMA map. */
   9593 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   9594 		    BUS_DMASYNC_PREWRITE);
   9595 
   9596 		/* Initialize the first transmit descriptor. */
   9597 		nexttx = txq->txq_next;
   9598 		if (!do_csum) {
   9599 			/* Set up a legacy descriptor */
   9600 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   9601 			    dmamap->dm_segs[0].ds_addr);
   9602 			txq->txq_descs[nexttx].wtx_cmdlen =
   9603 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   9604 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   9605 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   9606 			if (vlan_has_tag(m0)) {
   9607 				txq->txq_descs[nexttx].wtx_cmdlen |=
   9608 				    htole32(WTX_CMD_VLE);
   9609 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   9610 				    htole16(vlan_get_tag(m0));
   9611 			} else
   9612 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   9613 
   9614 			dcmdlen = 0;
   9615 		} else {
   9616 			/* Set up an advanced data descriptor */
   9617 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9618 			    htole64(dmamap->dm_segs[0].ds_addr);
   9619 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   9620 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9621 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   9622 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   9623 			    htole32(fields);
   9624 			DPRINTF(sc, WM_DEBUG_TX,
   9625 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   9626 				device_xname(sc->sc_dev), nexttx,
   9627 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   9628 			DPRINTF(sc, WM_DEBUG_TX,
   9629 			    ("\t 0x%08x%08x\n", fields,
   9630 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   9631 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   9632 		}
   9633 
   9634 		lasttx = nexttx;
   9635 		nexttx = WM_NEXTTX(txq, nexttx);
   9636 		/*
   9637 		 * Fill in the next descriptors. Legacy or advanced format
   9638 		 * is the same here.
   9639 		 */
   9640 		for (seg = 1; seg < dmamap->dm_nsegs;
   9641 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   9642 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9643 			    htole64(dmamap->dm_segs[seg].ds_addr);
   9644 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9645 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   9646 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   9647 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   9648 			lasttx = nexttx;
   9649 
   9650 			DPRINTF(sc, WM_DEBUG_TX,
   9651 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   9652 				device_xname(sc->sc_dev), nexttx,
   9653 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   9654 				dmamap->dm_segs[seg].ds_len));
   9655 		}
   9656 
   9657 		KASSERT(lasttx != -1);
   9658 
   9659 		/*
   9660 		 * Set up the command byte on the last descriptor of
   9661 		 * the packet. If we're in the interrupt delay window,
   9662 		 * delay the interrupt.
   9663 		 */
   9664 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   9665 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   9666 		txq->txq_descs[lasttx].wtx_cmdlen |=
   9667 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   9668 
   9669 		txs->txs_lastdesc = lasttx;
   9670 
   9671 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   9672 		    device_xname(sc->sc_dev),
   9673 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   9674 
   9675 		/* Sync the descriptors we're using. */
   9676 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   9677 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   9678 
   9679 		/* Give the packet to the chip. */
   9680 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   9681 		sent = true;
   9682 
   9683 		DPRINTF(sc, WM_DEBUG_TX,
   9684 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   9685 
   9686 		DPRINTF(sc, WM_DEBUG_TX,
   9687 		    ("%s: TX: finished transmitting packet, job %d\n",
   9688 			device_xname(sc->sc_dev), txq->txq_snext));
   9689 
   9690 		/* Advance the tx pointer. */
   9691 		txq->txq_free -= txs->txs_ndesc;
   9692 		txq->txq_next = nexttx;
   9693 
   9694 		txq->txq_sfree--;
   9695 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   9696 
   9697 		/* Pass the packet to any BPF listeners. */
   9698 		bpf_mtap(ifp, m0, BPF_D_OUT);
   9699 	}
   9700 
   9701 	if (m0 != NULL) {
   9702 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9703 		WM_Q_EVCNT_INCR(txq, descdrop);
   9704 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   9705 			__func__));
   9706 		m_freem(m0);
   9707 	}
   9708 
   9709 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   9710 		/* No more slots; notify upper layer. */
   9711 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9712 	}
   9713 
   9714 	if (sent) {
   9715 		/* Set a watchdog timer in case the chip flakes out. */
   9716 		txq->txq_lastsent = time_uptime;
   9717 		txq->txq_sending = true;
   9718 	}
   9719 }
   9720 
   9721 static void
   9722 wm_deferred_start_locked(struct wm_txqueue *txq)
   9723 {
   9724 	struct wm_softc *sc = txq->txq_sc;
   9725 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9726 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   9727 	int qid = wmq->wmq_id;
   9728 
   9729 	KASSERT(mutex_owned(txq->txq_lock));
   9730 	KASSERT(!txq->txq_stopping);
   9731 
   9732 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   9733 		/* XXX need for ALTQ or one CPU system */
   9734 		if (qid == 0)
   9735 			wm_nq_start_locked(ifp);
   9736 		wm_nq_transmit_locked(ifp, txq);
   9737 	} else {
   9738 		/* XXX need for ALTQ or one CPU system */
   9739 		if (qid == 0)
   9740 			wm_start_locked(ifp);
   9741 		wm_transmit_locked(ifp, txq);
   9742 	}
   9743 }
   9744 
   9745 /* Interrupt */
   9746 
   9747 /*
   9748  * wm_txeof:
   9749  *
   9750  *	Helper; handle transmit interrupts.
   9751  */
   9752 static bool
   9753 wm_txeof(struct wm_txqueue *txq, u_int limit)
   9754 {
   9755 	struct wm_softc *sc = txq->txq_sc;
   9756 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9757 	struct wm_txsoft *txs;
   9758 	int count = 0;
   9759 	int i;
   9760 	uint8_t status;
   9761 	bool more = false;
   9762 
   9763 	KASSERT(mutex_owned(txq->txq_lock));
   9764 
   9765 	if (txq->txq_stopping)
   9766 		return false;
   9767 
   9768 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   9769 
   9770 	/*
   9771 	 * Go through the Tx list and free mbufs for those
   9772 	 * frames which have been transmitted.
   9773 	 */
   9774 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   9775 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   9776 		txs = &txq->txq_soft[i];
   9777 
   9778 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   9779 			device_xname(sc->sc_dev), i));
   9780 
   9781 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   9782 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9783 
   9784 		status =
   9785 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   9786 		if ((status & WTX_ST_DD) == 0) {
   9787 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   9788 			    BUS_DMASYNC_PREREAD);
   9789 			break;
   9790 		}
   9791 
   9792 		if (limit-- == 0) {
   9793 			more = true;
   9794 			DPRINTF(sc, WM_DEBUG_TX,
   9795 			    ("%s: TX: loop limited, job %d is not processed\n",
   9796 				device_xname(sc->sc_dev), i));
   9797 			break;
   9798 		}
   9799 
   9800 		count++;
   9801 		DPRINTF(sc, WM_DEBUG_TX,
   9802 		    ("%s: TX: job %d done: descs %d..%d\n",
   9803 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   9804 		    txs->txs_lastdesc));
   9805 
   9806 #ifdef WM_EVENT_COUNTERS
   9807 		if ((status & WTX_ST_TU) && (sc->sc_type <= WM_T_82544))
   9808 			WM_Q_EVCNT_INCR(txq, underrun);
   9809 #endif /* WM_EVENT_COUNTERS */
   9810 
   9811 		/*
   9812 		 * 82574 and newer's document says the status field has neither
   9813 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   9814 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   9815 		 * Developer's Manual", 82574 datasheet and newer.
   9816 		 *
   9817 		 * XXX I saw the LC bit was set on I218 even though the media
   9818 		 * was full duplex, so the bit might be used for other
   9819 		 * meaning ...(I have no document).
   9820 		 */
   9821 
   9822 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   9823 		    && ((sc->sc_type < WM_T_82574)
   9824 			|| (sc->sc_type == WM_T_80003))) {
   9825 			if_statinc(ifp, if_oerrors);
   9826 			if (status & WTX_ST_LC)
   9827 				log(LOG_WARNING, "%s: late collision\n",
   9828 				    device_xname(sc->sc_dev));
   9829 			else if (status & WTX_ST_EC) {
   9830 				if_statadd(ifp, if_collisions,
   9831 				    TX_COLLISION_THRESHOLD + 1);
   9832 				log(LOG_WARNING, "%s: excessive collisions\n",
   9833 				    device_xname(sc->sc_dev));
   9834 			}
   9835 		} else
   9836 			if_statinc(ifp, if_opackets);
   9837 
   9838 		txq->txq_packets++;
   9839 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   9840 
   9841 		txq->txq_free += txs->txs_ndesc;
   9842 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   9843 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   9844 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   9845 		m_freem(txs->txs_mbuf);
   9846 		txs->txs_mbuf = NULL;
   9847 	}
   9848 
   9849 	/* Update the dirty transmit buffer pointer. */
   9850 	txq->txq_sdirty = i;
   9851 	DPRINTF(sc, WM_DEBUG_TX,
   9852 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   9853 
   9854 	if (count != 0)
   9855 		rnd_add_uint32(&sc->rnd_source, count);
   9856 
   9857 	/*
   9858 	 * If there are no more pending transmissions, cancel the watchdog
   9859 	 * timer.
   9860 	 */
   9861 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   9862 		txq->txq_sending = false;
   9863 
   9864 	return more;
   9865 }
   9866 
   9867 static inline uint32_t
   9868 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   9869 {
   9870 	struct wm_softc *sc = rxq->rxq_sc;
   9871 
   9872 	if (sc->sc_type == WM_T_82574)
   9873 		return EXTRXC_STATUS(
   9874 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9875 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9876 		return NQRXC_STATUS(
   9877 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9878 	else
   9879 		return rxq->rxq_descs[idx].wrx_status;
   9880 }
   9881 
   9882 static inline uint32_t
   9883 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   9884 {
   9885 	struct wm_softc *sc = rxq->rxq_sc;
   9886 
   9887 	if (sc->sc_type == WM_T_82574)
   9888 		return EXTRXC_ERROR(
   9889 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9890 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9891 		return NQRXC_ERROR(
   9892 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9893 	else
   9894 		return rxq->rxq_descs[idx].wrx_errors;
   9895 }
   9896 
   9897 static inline uint16_t
   9898 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   9899 {
   9900 	struct wm_softc *sc = rxq->rxq_sc;
   9901 
   9902 	if (sc->sc_type == WM_T_82574)
   9903 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   9904 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9905 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   9906 	else
   9907 		return rxq->rxq_descs[idx].wrx_special;
   9908 }
   9909 
   9910 static inline int
   9911 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   9912 {
   9913 	struct wm_softc *sc = rxq->rxq_sc;
   9914 
   9915 	if (sc->sc_type == WM_T_82574)
   9916 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   9917 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9918 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   9919 	else
   9920 		return rxq->rxq_descs[idx].wrx_len;
   9921 }
   9922 
   9923 #ifdef WM_DEBUG
   9924 static inline uint32_t
   9925 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   9926 {
   9927 	struct wm_softc *sc = rxq->rxq_sc;
   9928 
   9929 	if (sc->sc_type == WM_T_82574)
   9930 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   9931 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9932 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   9933 	else
   9934 		return 0;
   9935 }
   9936 
   9937 static inline uint8_t
   9938 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   9939 {
   9940 	struct wm_softc *sc = rxq->rxq_sc;
   9941 
   9942 	if (sc->sc_type == WM_T_82574)
   9943 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   9944 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9945 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   9946 	else
   9947 		return 0;
   9948 }
   9949 #endif /* WM_DEBUG */
   9950 
   9951 static inline bool
   9952 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   9953     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9954 {
   9955 
   9956 	if (sc->sc_type == WM_T_82574)
   9957 		return (status & ext_bit) != 0;
   9958 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9959 		return (status & nq_bit) != 0;
   9960 	else
   9961 		return (status & legacy_bit) != 0;
   9962 }
   9963 
   9964 static inline bool
   9965 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   9966     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9967 {
   9968 
   9969 	if (sc->sc_type == WM_T_82574)
   9970 		return (error & ext_bit) != 0;
   9971 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9972 		return (error & nq_bit) != 0;
   9973 	else
   9974 		return (error & legacy_bit) != 0;
   9975 }
   9976 
   9977 static inline bool
   9978 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   9979 {
   9980 
   9981 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9982 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   9983 		return true;
   9984 	else
   9985 		return false;
   9986 }
   9987 
   9988 static inline bool
   9989 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   9990 {
   9991 	struct wm_softc *sc = rxq->rxq_sc;
   9992 
   9993 	/* XXX missing error bit for newqueue? */
   9994 	if (wm_rxdesc_is_set_error(sc, errors,
   9995 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   9996 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   9997 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   9998 		NQRXC_ERROR_RXE)) {
   9999 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   10000 		    EXTRXC_ERROR_SE, 0))
   10001 			log(LOG_WARNING, "%s: symbol error\n",
   10002 			    device_xname(sc->sc_dev));
   10003 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   10004 		    EXTRXC_ERROR_SEQ, 0))
   10005 			log(LOG_WARNING, "%s: receive sequence error\n",
   10006 			    device_xname(sc->sc_dev));
   10007 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   10008 		    EXTRXC_ERROR_CE, 0))
   10009 			log(LOG_WARNING, "%s: CRC error\n",
   10010 			    device_xname(sc->sc_dev));
   10011 		return true;
   10012 	}
   10013 
   10014 	return false;
   10015 }
   10016 
   10017 static inline bool
   10018 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   10019 {
   10020 	struct wm_softc *sc = rxq->rxq_sc;
   10021 
   10022 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   10023 		NQRXC_STATUS_DD)) {
   10024 		/* We have processed all of the receive descriptors. */
   10025 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   10026 		return false;
   10027 	}
   10028 
   10029 	return true;
   10030 }
   10031 
   10032 static inline bool
   10033 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   10034     uint16_t vlantag, struct mbuf *m)
   10035 {
   10036 
   10037 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   10038 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   10039 		vlan_set_tag(m, le16toh(vlantag));
   10040 	}
   10041 
   10042 	return true;
   10043 }
   10044 
   10045 static inline void
   10046 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   10047     uint32_t errors, struct mbuf *m)
   10048 {
   10049 	struct wm_softc *sc = rxq->rxq_sc;
   10050 
   10051 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   10052 		if (wm_rxdesc_is_set_status(sc, status,
   10053 		    WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   10054 			WM_Q_EVCNT_INCR(rxq, ipsum);
   10055 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   10056 			if (wm_rxdesc_is_set_error(sc, errors,
   10057 			    WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   10058 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   10059 		}
   10060 		if (wm_rxdesc_is_set_status(sc, status,
   10061 		    WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   10062 			/*
   10063 			 * Note: we don't know if this was TCP or UDP,
   10064 			 * so we just set both bits, and expect the
   10065 			 * upper layers to deal.
   10066 			 */
   10067 			WM_Q_EVCNT_INCR(rxq, tusum);
   10068 			m->m_pkthdr.csum_flags |=
   10069 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   10070 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   10071 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   10072 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   10073 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   10074 		}
   10075 	}
   10076 }
   10077 
   10078 /*
   10079  * wm_rxeof:
   10080  *
   10081  *	Helper; handle receive interrupts.
   10082  */
   10083 static bool
   10084 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   10085 {
   10086 	struct wm_softc *sc = rxq->rxq_sc;
   10087 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10088 	struct wm_rxsoft *rxs;
   10089 	struct mbuf *m;
   10090 	int i, len;
   10091 	int count = 0;
   10092 	uint32_t status, errors;
   10093 	uint16_t vlantag;
   10094 	bool more = false;
   10095 
   10096 	KASSERT(mutex_owned(rxq->rxq_lock));
   10097 
   10098 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   10099 		rxs = &rxq->rxq_soft[i];
   10100 
   10101 		DPRINTF(sc, WM_DEBUG_RX,
   10102 		    ("%s: RX: checking descriptor %d\n",
   10103 			device_xname(sc->sc_dev), i));
   10104 		wm_cdrxsync(rxq, i,
   10105 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   10106 
   10107 		status = wm_rxdesc_get_status(rxq, i);
   10108 		errors = wm_rxdesc_get_errors(rxq, i);
   10109 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   10110 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   10111 #ifdef WM_DEBUG
   10112 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   10113 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   10114 #endif
   10115 
   10116 		if (!wm_rxdesc_dd(rxq, i, status))
   10117 			break;
   10118 
   10119 		if (limit-- == 0) {
   10120 			more = true;
   10121 			DPRINTF(sc, WM_DEBUG_RX,
   10122 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   10123 				device_xname(sc->sc_dev), i));
   10124 			break;
   10125 		}
   10126 
   10127 		count++;
   10128 		if (__predict_false(rxq->rxq_discard)) {
   10129 			DPRINTF(sc, WM_DEBUG_RX,
   10130 			    ("%s: RX: discarding contents of descriptor %d\n",
   10131 				device_xname(sc->sc_dev), i));
   10132 			wm_init_rxdesc(rxq, i);
   10133 			if (wm_rxdesc_is_eop(rxq, status)) {
   10134 				/* Reset our state. */
   10135 				DPRINTF(sc, WM_DEBUG_RX,
   10136 				    ("%s: RX: resetting rxdiscard -> 0\n",
   10137 					device_xname(sc->sc_dev)));
   10138 				rxq->rxq_discard = 0;
   10139 			}
   10140 			continue;
   10141 		}
   10142 
   10143 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   10144 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   10145 
   10146 		m = rxs->rxs_mbuf;
   10147 
   10148 		/*
   10149 		 * Add a new receive buffer to the ring, unless of
   10150 		 * course the length is zero. Treat the latter as a
   10151 		 * failed mapping.
   10152 		 */
   10153 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   10154 			/*
   10155 			 * Failed, throw away what we've done so
   10156 			 * far, and discard the rest of the packet.
   10157 			 */
   10158 			if_statinc(ifp, if_ierrors);
   10159 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   10160 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   10161 			wm_init_rxdesc(rxq, i);
   10162 			if (!wm_rxdesc_is_eop(rxq, status))
   10163 				rxq->rxq_discard = 1;
   10164 			if (rxq->rxq_head != NULL)
   10165 				m_freem(rxq->rxq_head);
   10166 			WM_RXCHAIN_RESET(rxq);
   10167 			DPRINTF(sc, WM_DEBUG_RX,
   10168 			    ("%s: RX: Rx buffer allocation failed, "
   10169 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   10170 				rxq->rxq_discard ? " (discard)" : ""));
   10171 			continue;
   10172 		}
   10173 
   10174 		m->m_len = len;
   10175 		rxq->rxq_len += len;
   10176 		DPRINTF(sc, WM_DEBUG_RX,
   10177 		    ("%s: RX: buffer at %p len %d\n",
   10178 			device_xname(sc->sc_dev), m->m_data, len));
   10179 
   10180 		/* If this is not the end of the packet, keep looking. */
   10181 		if (!wm_rxdesc_is_eop(rxq, status)) {
   10182 			WM_RXCHAIN_LINK(rxq, m);
   10183 			DPRINTF(sc, WM_DEBUG_RX,
   10184 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   10185 				device_xname(sc->sc_dev), rxq->rxq_len));
   10186 			continue;
   10187 		}
   10188 
   10189 		/*
   10190 		 * Okay, we have the entire packet now. The chip is
   10191 		 * configured to include the FCS except I35[04], I21[01].
   10192 		 * (not all chips can be configured to strip it), so we need
   10193 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
   10194 		 * in RCTL register is always set, so we don't trim it.
   10195 		 * PCH2 and newer chip also not include FCS when jumbo
   10196 		 * frame is used to do workaround an errata.
   10197 		 * May need to adjust length of previous mbuf in the
   10198 		 * chain if the current mbuf is too short.
   10199 		 */
   10200 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
   10201 			if (m->m_len < ETHER_CRC_LEN) {
   10202 				rxq->rxq_tail->m_len
   10203 				    -= (ETHER_CRC_LEN - m->m_len);
   10204 				m->m_len = 0;
   10205 			} else
   10206 				m->m_len -= ETHER_CRC_LEN;
   10207 			len = rxq->rxq_len - ETHER_CRC_LEN;
   10208 		} else
   10209 			len = rxq->rxq_len;
   10210 
   10211 		WM_RXCHAIN_LINK(rxq, m);
   10212 
   10213 		*rxq->rxq_tailp = NULL;
   10214 		m = rxq->rxq_head;
   10215 
   10216 		WM_RXCHAIN_RESET(rxq);
   10217 
   10218 		DPRINTF(sc, WM_DEBUG_RX,
   10219 		    ("%s: RX: have entire packet, len -> %d\n",
   10220 			device_xname(sc->sc_dev), len));
   10221 
   10222 		/* If an error occurred, update stats and drop the packet. */
   10223 		if (wm_rxdesc_has_errors(rxq, errors)) {
   10224 			m_freem(m);
   10225 			continue;
   10226 		}
   10227 
   10228 		/* No errors.  Receive the packet. */
   10229 		m_set_rcvif(m, ifp);
   10230 		m->m_pkthdr.len = len;
   10231 		/*
   10232 		 * TODO
   10233 		 * should be save rsshash and rsstype to this mbuf.
   10234 		 */
   10235 		DPRINTF(sc, WM_DEBUG_RX,
   10236 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   10237 			device_xname(sc->sc_dev), rsstype, rsshash));
   10238 
   10239 		/*
   10240 		 * If VLANs are enabled, VLAN packets have been unwrapped
   10241 		 * for us.  Associate the tag with the packet.
   10242 		 */
   10243 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   10244 			continue;
   10245 
   10246 		/* Set up checksum info for this packet. */
   10247 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   10248 
   10249 		rxq->rxq_packets++;
   10250 		rxq->rxq_bytes += len;
   10251 		/* Pass it on. */
   10252 		if_percpuq_enqueue(sc->sc_ipq, m);
   10253 
   10254 		if (rxq->rxq_stopping)
   10255 			break;
   10256 	}
   10257 	rxq->rxq_ptr = i;
   10258 
   10259 	if (count != 0)
   10260 		rnd_add_uint32(&sc->rnd_source, count);
   10261 
   10262 	DPRINTF(sc, WM_DEBUG_RX,
   10263 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   10264 
   10265 	return more;
   10266 }
   10267 
   10268 /*
   10269  * wm_linkintr_gmii:
   10270  *
   10271  *	Helper; handle link interrupts for GMII.
   10272  */
   10273 static void
   10274 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   10275 {
   10276 	device_t dev = sc->sc_dev;
   10277 	uint32_t status, reg;
   10278 	bool link;
   10279 	int rv;
   10280 
   10281 	KASSERT(mutex_owned(sc->sc_core_lock));
   10282 
   10283 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   10284 		__func__));
   10285 
   10286 	if ((icr & ICR_LSC) == 0) {
   10287 		if (icr & ICR_RXSEQ)
   10288 			DPRINTF(sc, WM_DEBUG_LINK,
   10289 			    ("%s: LINK Receive sequence error\n",
   10290 				device_xname(dev)));
   10291 		return;
   10292 	}
   10293 
   10294 	/* Link status changed */
   10295 	status = CSR_READ(sc, WMREG_STATUS);
   10296 	link = status & STATUS_LU;
   10297 	if (link) {
   10298 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   10299 			device_xname(dev),
   10300 			(status & STATUS_FD) ? "FDX" : "HDX"));
   10301 		if (wm_phy_need_linkdown_discard(sc)) {
   10302 			DPRINTF(sc, WM_DEBUG_LINK,
   10303 			    ("%s: linkintr: Clear linkdown discard flag\n",
   10304 				device_xname(dev)));
   10305 			wm_clear_linkdown_discard(sc);
   10306 		}
   10307 	} else {
   10308 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10309 			device_xname(dev)));
   10310 		if (wm_phy_need_linkdown_discard(sc)) {
   10311 			DPRINTF(sc, WM_DEBUG_LINK,
   10312 			    ("%s: linkintr: Set linkdown discard flag\n",
   10313 				device_xname(dev)));
   10314 			wm_set_linkdown_discard(sc);
   10315 		}
   10316 	}
   10317 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   10318 		wm_gig_downshift_workaround_ich8lan(sc);
   10319 
   10320 	if ((sc->sc_type == WM_T_ICH8) && (sc->sc_phytype == WMPHY_IGP_3))
   10321 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   10322 
   10323 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   10324 		device_xname(dev)));
   10325 	mii_pollstat(&sc->sc_mii);
   10326 	if (sc->sc_type == WM_T_82543) {
   10327 		int miistatus, active;
   10328 
   10329 		/*
   10330 		 * With 82543, we need to force speed and
   10331 		 * duplex on the MAC equal to what the PHY
   10332 		 * speed and duplex configuration is.
   10333 		 */
   10334 		miistatus = sc->sc_mii.mii_media_status;
   10335 
   10336 		if (miistatus & IFM_ACTIVE) {
   10337 			active = sc->sc_mii.mii_media_active;
   10338 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10339 			switch (IFM_SUBTYPE(active)) {
   10340 			case IFM_10_T:
   10341 				sc->sc_ctrl |= CTRL_SPEED_10;
   10342 				break;
   10343 			case IFM_100_TX:
   10344 				sc->sc_ctrl |= CTRL_SPEED_100;
   10345 				break;
   10346 			case IFM_1000_T:
   10347 				sc->sc_ctrl |= CTRL_SPEED_1000;
   10348 				break;
   10349 			default:
   10350 				/*
   10351 				 * Fiber?
   10352 				 * Shoud not enter here.
   10353 				 */
   10354 				device_printf(dev, "unknown media (%x)\n",
   10355 				    active);
   10356 				break;
   10357 			}
   10358 			if (active & IFM_FDX)
   10359 				sc->sc_ctrl |= CTRL_FD;
   10360 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10361 		}
   10362 	} else if (sc->sc_type == WM_T_PCH) {
   10363 		wm_k1_gig_workaround_hv(sc,
   10364 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   10365 	}
   10366 
   10367 	/*
   10368 	 * When connected at 10Mbps half-duplex, some parts are excessively
   10369 	 * aggressive resulting in many collisions. To avoid this, increase
   10370 	 * the IPG and reduce Rx latency in the PHY.
   10371 	 */
   10372 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   10373 	    && link) {
   10374 		uint32_t tipg_reg;
   10375 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   10376 		bool fdx;
   10377 		uint16_t emi_addr, emi_val;
   10378 
   10379 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   10380 		tipg_reg &= ~TIPG_IPGT_MASK;
   10381 		fdx = status & STATUS_FD;
   10382 
   10383 		if (!fdx && (speed == STATUS_SPEED_10)) {
   10384 			tipg_reg |= 0xff;
   10385 			/* Reduce Rx latency in analog PHY */
   10386 			emi_val = 0;
   10387 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   10388 		    fdx && speed != STATUS_SPEED_1000) {
   10389 			tipg_reg |= 0xc;
   10390 			emi_val = 1;
   10391 		} else {
   10392 			/* Roll back the default values */
   10393 			tipg_reg |= 0x08;
   10394 			emi_val = 1;
   10395 		}
   10396 
   10397 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   10398 
   10399 		rv = sc->phy.acquire(sc);
   10400 		if (rv)
   10401 			return;
   10402 
   10403 		if (sc->sc_type == WM_T_PCH2)
   10404 			emi_addr = I82579_RX_CONFIG;
   10405 		else
   10406 			emi_addr = I217_RX_CONFIG;
   10407 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   10408 
   10409 		if (sc->sc_type >= WM_T_PCH_LPT) {
   10410 			uint16_t phy_reg;
   10411 
   10412 			sc->phy.readreg_locked(dev, 2,
   10413 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   10414 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   10415 			if (speed == STATUS_SPEED_100
   10416 			    || speed == STATUS_SPEED_10)
   10417 				phy_reg |= 0x3e8;
   10418 			else
   10419 				phy_reg |= 0xfa;
   10420 			sc->phy.writereg_locked(dev, 2,
   10421 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   10422 
   10423 			if (speed == STATUS_SPEED_1000) {
   10424 				sc->phy.readreg_locked(dev, 2,
   10425 				    HV_PM_CTRL, &phy_reg);
   10426 
   10427 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   10428 
   10429 				sc->phy.writereg_locked(dev, 2,
   10430 				    HV_PM_CTRL, phy_reg);
   10431 			}
   10432 		}
   10433 		sc->phy.release(sc);
   10434 
   10435 		if (rv)
   10436 			return;
   10437 
   10438 		if (sc->sc_type >= WM_T_PCH_SPT) {
   10439 			uint16_t data, ptr_gap;
   10440 
   10441 			if (speed == STATUS_SPEED_1000) {
   10442 				rv = sc->phy.acquire(sc);
   10443 				if (rv)
   10444 					return;
   10445 
   10446 				rv = sc->phy.readreg_locked(dev, 2,
   10447 				    I82579_UNKNOWN1, &data);
   10448 				if (rv) {
   10449 					sc->phy.release(sc);
   10450 					return;
   10451 				}
   10452 
   10453 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   10454 				if (ptr_gap < 0x18) {
   10455 					data &= ~(0x3ff << 2);
   10456 					data |= (0x18 << 2);
   10457 					rv = sc->phy.writereg_locked(dev,
   10458 					    2, I82579_UNKNOWN1, data);
   10459 				}
   10460 				sc->phy.release(sc);
   10461 				if (rv)
   10462 					return;
   10463 			} else {
   10464 				rv = sc->phy.acquire(sc);
   10465 				if (rv)
   10466 					return;
   10467 
   10468 				rv = sc->phy.writereg_locked(dev, 2,
   10469 				    I82579_UNKNOWN1, 0xc023);
   10470 				sc->phy.release(sc);
   10471 				if (rv)
   10472 					return;
   10473 
   10474 			}
   10475 		}
   10476 	}
   10477 
   10478 	/*
   10479 	 * I217 Packet Loss issue:
   10480 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   10481 	 * on power up.
   10482 	 * Set the Beacon Duration for I217 to 8 usec
   10483 	 */
   10484 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10485 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   10486 		reg &= ~FEXTNVM4_BEACON_DURATION;
   10487 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   10488 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   10489 	}
   10490 
   10491 	/* Work-around I218 hang issue */
   10492 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   10493 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   10494 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   10495 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   10496 		wm_k1_workaround_lpt_lp(sc, link);
   10497 
   10498 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10499 		/*
   10500 		 * Set platform power management values for Latency
   10501 		 * Tolerance Reporting (LTR)
   10502 		 */
   10503 		wm_platform_pm_pch_lpt(sc,
   10504 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   10505 	}
   10506 
   10507 	/* Clear link partner's EEE ability */
   10508 	sc->eee_lp_ability = 0;
   10509 
   10510 	/* FEXTNVM6 K1-off workaround */
   10511 	if (sc->sc_type == WM_T_PCH_SPT) {
   10512 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   10513 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   10514 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   10515 		else
   10516 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   10517 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   10518 	}
   10519 
   10520 	if (!link)
   10521 		return;
   10522 
   10523 	switch (sc->sc_type) {
   10524 	case WM_T_PCH2:
   10525 		wm_k1_workaround_lv(sc);
   10526 		/* FALLTHROUGH */
   10527 	case WM_T_PCH:
   10528 		if (sc->sc_phytype == WMPHY_82578)
   10529 			wm_link_stall_workaround_hv(sc);
   10530 		break;
   10531 	default:
   10532 		break;
   10533 	}
   10534 
   10535 	/* Enable/Disable EEE after link up */
   10536 	if (sc->sc_phytype > WMPHY_82579)
   10537 		wm_set_eee_pchlan(sc);
   10538 }
   10539 
   10540 /*
   10541  * wm_linkintr_tbi:
   10542  *
   10543  *	Helper; handle link interrupts for TBI mode.
   10544  */
   10545 static void
   10546 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   10547 {
   10548 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10549 	uint32_t status;
   10550 
   10551 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10552 		__func__));
   10553 
   10554 	status = CSR_READ(sc, WMREG_STATUS);
   10555 	if (icr & ICR_LSC) {
   10556 		wm_check_for_link(sc);
   10557 		if (status & STATUS_LU) {
   10558 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   10559 				device_xname(sc->sc_dev),
   10560 				(status & STATUS_FD) ? "FDX" : "HDX"));
   10561 			/*
   10562 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10563 			 * so we should update sc->sc_ctrl
   10564 			 */
   10565 
   10566 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10567 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10568 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10569 			if (status & STATUS_FD)
   10570 				sc->sc_tctl |=
   10571 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10572 			else
   10573 				sc->sc_tctl |=
   10574 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10575 			if (sc->sc_ctrl & CTRL_TFCE)
   10576 				sc->sc_fcrtl |= FCRTL_XONE;
   10577 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10578 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10579 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   10580 			sc->sc_tbi_linkup = 1;
   10581 			if_link_state_change(ifp, LINK_STATE_UP);
   10582 		} else {
   10583 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10584 				device_xname(sc->sc_dev)));
   10585 			sc->sc_tbi_linkup = 0;
   10586 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10587 		}
   10588 		/* Update LED */
   10589 		wm_tbi_serdes_set_linkled(sc);
   10590 	} else if (icr & ICR_RXSEQ)
   10591 		DPRINTF(sc, WM_DEBUG_LINK,
   10592 		    ("%s: LINK: Receive sequence error\n",
   10593 			device_xname(sc->sc_dev)));
   10594 }
   10595 
   10596 /*
   10597  * wm_linkintr_serdes:
   10598  *
   10599  *	Helper; handle link interrupts for TBI mode.
   10600  */
   10601 static void
   10602 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   10603 {
   10604 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10605 	struct mii_data *mii = &sc->sc_mii;
   10606 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10607 	uint32_t pcs_adv, pcs_lpab, reg;
   10608 
   10609 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10610 		__func__));
   10611 
   10612 	if (icr & ICR_LSC) {
   10613 		/* Check PCS */
   10614 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10615 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   10616 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   10617 				device_xname(sc->sc_dev)));
   10618 			mii->mii_media_status |= IFM_ACTIVE;
   10619 			sc->sc_tbi_linkup = 1;
   10620 			if_link_state_change(ifp, LINK_STATE_UP);
   10621 		} else {
   10622 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10623 				device_xname(sc->sc_dev)));
   10624 			mii->mii_media_status |= IFM_NONE;
   10625 			sc->sc_tbi_linkup = 0;
   10626 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10627 			wm_tbi_serdes_set_linkled(sc);
   10628 			return;
   10629 		}
   10630 		mii->mii_media_active |= IFM_1000_SX;
   10631 		if ((reg & PCS_LSTS_FDX) != 0)
   10632 			mii->mii_media_active |= IFM_FDX;
   10633 		else
   10634 			mii->mii_media_active |= IFM_HDX;
   10635 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10636 			/* Check flow */
   10637 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10638 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10639 				DPRINTF(sc, WM_DEBUG_LINK,
   10640 				    ("XXX LINKOK but not ACOMP\n"));
   10641 				return;
   10642 			}
   10643 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10644 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10645 			DPRINTF(sc, WM_DEBUG_LINK,
   10646 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   10647 			if ((pcs_adv & TXCW_SYM_PAUSE)
   10648 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10649 				mii->mii_media_active |= IFM_FLOW
   10650 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10651 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10652 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10653 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   10654 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10655 				mii->mii_media_active |= IFM_FLOW
   10656 				    | IFM_ETH_TXPAUSE;
   10657 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   10658 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10659 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10660 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10661 				mii->mii_media_active |= IFM_FLOW
   10662 				    | IFM_ETH_RXPAUSE;
   10663 		}
   10664 		/* Update LED */
   10665 		wm_tbi_serdes_set_linkled(sc);
   10666 	} else
   10667 		DPRINTF(sc, WM_DEBUG_LINK,
   10668 		    ("%s: LINK: Receive sequence error\n",
   10669 		    device_xname(sc->sc_dev)));
   10670 }
   10671 
   10672 /*
   10673  * wm_linkintr:
   10674  *
   10675  *	Helper; handle link interrupts.
   10676  */
   10677 static void
   10678 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   10679 {
   10680 
   10681 	KASSERT(mutex_owned(sc->sc_core_lock));
   10682 
   10683 	if (sc->sc_flags & WM_F_HAS_MII)
   10684 		wm_linkintr_gmii(sc, icr);
   10685 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10686 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   10687 		wm_linkintr_serdes(sc, icr);
   10688 	else
   10689 		wm_linkintr_tbi(sc, icr);
   10690 }
   10691 
   10692 
   10693 static inline void
   10694 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   10695 {
   10696 
   10697 	if (wmq->wmq_txrx_use_workqueue) {
   10698 		if (!wmq->wmq_wq_enqueued) {
   10699 			wmq->wmq_wq_enqueued = true;
   10700 			workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie,
   10701 			    curcpu());
   10702 		}
   10703 	} else
   10704 		softint_schedule(wmq->wmq_si);
   10705 }
   10706 
   10707 static inline void
   10708 wm_legacy_intr_disable(struct wm_softc *sc)
   10709 {
   10710 
   10711 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   10712 }
   10713 
   10714 static inline void
   10715 wm_legacy_intr_enable(struct wm_softc *sc)
   10716 {
   10717 
   10718 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   10719 }
   10720 
   10721 /*
   10722  * wm_intr_legacy:
   10723  *
   10724  *	Interrupt service routine for INTx and MSI.
   10725  */
   10726 static int
   10727 wm_intr_legacy(void *arg)
   10728 {
   10729 	struct wm_softc *sc = arg;
   10730 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10731 	struct wm_queue *wmq = &sc->sc_queue[0];
   10732 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10733 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10734 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10735 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10736 	uint32_t icr, rndval = 0;
   10737 	bool more = false;
   10738 
   10739 	icr = CSR_READ(sc, WMREG_ICR);
   10740 	if ((icr & sc->sc_icr) == 0)
   10741 		return 0;
   10742 
   10743 	DPRINTF(sc, WM_DEBUG_TX,
   10744 	    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   10745 	if (rndval == 0)
   10746 		rndval = icr;
   10747 
   10748 	mutex_enter(txq->txq_lock);
   10749 
   10750 	if (txq->txq_stopping) {
   10751 		mutex_exit(txq->txq_lock);
   10752 		return 1;
   10753 	}
   10754 
   10755 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10756 	if (icr & ICR_TXDW) {
   10757 		DPRINTF(sc, WM_DEBUG_TX,
   10758 		    ("%s: TX: got TXDW interrupt\n",
   10759 			device_xname(sc->sc_dev)));
   10760 		WM_Q_EVCNT_INCR(txq, txdw);
   10761 	}
   10762 #endif
   10763 	if (txlimit > 0) {
   10764 		more |= wm_txeof(txq, txlimit);
   10765 		if (!IF_IS_EMPTY(&ifp->if_snd))
   10766 			more = true;
   10767 	} else
   10768 		more = true;
   10769 	mutex_exit(txq->txq_lock);
   10770 
   10771 	mutex_enter(rxq->rxq_lock);
   10772 
   10773 	if (rxq->rxq_stopping) {
   10774 		mutex_exit(rxq->rxq_lock);
   10775 		return 1;
   10776 	}
   10777 
   10778 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10779 	if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   10780 		DPRINTF(sc, WM_DEBUG_RX,
   10781 		    ("%s: RX: got Rx intr %#" __PRIxBIT "\n",
   10782 			device_xname(sc->sc_dev),
   10783 			icr & (ICR_RXDMT0 | ICR_RXT0)));
   10784 		WM_Q_EVCNT_INCR(rxq, intr);
   10785 	}
   10786 #endif
   10787 	if (rxlimit > 0) {
   10788 		/*
   10789 		 * wm_rxeof() does *not* call upper layer functions directly,
   10790 		 * as if_percpuq_enqueue() just call softint_schedule().
   10791 		 * So, we can call wm_rxeof() in interrupt context.
   10792 		 */
   10793 		more = wm_rxeof(rxq, rxlimit);
   10794 	} else
   10795 		more = true;
   10796 
   10797 	mutex_exit(rxq->rxq_lock);
   10798 
   10799 	mutex_enter(sc->sc_core_lock);
   10800 
   10801 	if (sc->sc_core_stopping) {
   10802 		mutex_exit(sc->sc_core_lock);
   10803 		return 1;
   10804 	}
   10805 
   10806 	if (icr & (ICR_LSC | ICR_RXSEQ)) {
   10807 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10808 		wm_linkintr(sc, icr);
   10809 	}
   10810 	if ((icr & ICR_GPI(0)) != 0)
   10811 		device_printf(sc->sc_dev, "got module interrupt\n");
   10812 
   10813 	mutex_exit(sc->sc_core_lock);
   10814 
   10815 	if (icr & ICR_RXO) {
   10816 #if defined(WM_DEBUG)
   10817 		log(LOG_WARNING, "%s: Receive overrun\n",
   10818 		    device_xname(sc->sc_dev));
   10819 #endif /* defined(WM_DEBUG) */
   10820 	}
   10821 
   10822 	rnd_add_uint32(&sc->rnd_source, rndval);
   10823 
   10824 	if (more) {
   10825 		/* Try to get more packets going. */
   10826 		wm_legacy_intr_disable(sc);
   10827 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10828 		wm_sched_handle_queue(sc, wmq);
   10829 	}
   10830 
   10831 	return 1;
   10832 }
   10833 
   10834 static inline void
   10835 wm_txrxintr_disable(struct wm_queue *wmq)
   10836 {
   10837 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10838 
   10839 	if (__predict_false(!wm_is_using_msix(sc))) {
   10840 		wm_legacy_intr_disable(sc);
   10841 		return;
   10842 	}
   10843 
   10844 	if (sc->sc_type == WM_T_82574)
   10845 		CSR_WRITE(sc, WMREG_IMC,
   10846 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   10847 	else if (sc->sc_type == WM_T_82575)
   10848 		CSR_WRITE(sc, WMREG_EIMC,
   10849 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10850 	else
   10851 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   10852 }
   10853 
   10854 static inline void
   10855 wm_txrxintr_enable(struct wm_queue *wmq)
   10856 {
   10857 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10858 
   10859 	wm_itrs_calculate(sc, wmq);
   10860 
   10861 	if (__predict_false(!wm_is_using_msix(sc))) {
   10862 		wm_legacy_intr_enable(sc);
   10863 		return;
   10864 	}
   10865 
   10866 	/*
   10867 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   10868 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   10869 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   10870 	 * while each wm_handle_queue(wmq) is runnig.
   10871 	 */
   10872 	if (sc->sc_type == WM_T_82574)
   10873 		CSR_WRITE(sc, WMREG_IMS,
   10874 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   10875 	else if (sc->sc_type == WM_T_82575)
   10876 		CSR_WRITE(sc, WMREG_EIMS,
   10877 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10878 	else
   10879 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   10880 }
   10881 
   10882 static int
   10883 wm_txrxintr_msix(void *arg)
   10884 {
   10885 	struct wm_queue *wmq = arg;
   10886 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10887 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10888 	struct wm_softc *sc = txq->txq_sc;
   10889 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10890 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10891 	bool txmore;
   10892 	bool rxmore;
   10893 
   10894 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   10895 
   10896 	DPRINTF(sc, WM_DEBUG_TX,
   10897 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   10898 
   10899 	wm_txrxintr_disable(wmq);
   10900 
   10901 	mutex_enter(txq->txq_lock);
   10902 
   10903 	if (txq->txq_stopping) {
   10904 		mutex_exit(txq->txq_lock);
   10905 		return 1;
   10906 	}
   10907 
   10908 	WM_Q_EVCNT_INCR(txq, txdw);
   10909 	if (txlimit > 0) {
   10910 		txmore = wm_txeof(txq, txlimit);
   10911 		/* wm_deferred start() is done in wm_handle_queue(). */
   10912 	} else
   10913 		txmore = true;
   10914 	mutex_exit(txq->txq_lock);
   10915 
   10916 	DPRINTF(sc, WM_DEBUG_RX,
   10917 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   10918 	mutex_enter(rxq->rxq_lock);
   10919 
   10920 	if (rxq->rxq_stopping) {
   10921 		mutex_exit(rxq->rxq_lock);
   10922 		return 1;
   10923 	}
   10924 
   10925 	WM_Q_EVCNT_INCR(rxq, intr);
   10926 	if (rxlimit > 0) {
   10927 		rxmore = wm_rxeof(rxq, rxlimit);
   10928 	} else
   10929 		rxmore = true;
   10930 	mutex_exit(rxq->rxq_lock);
   10931 
   10932 	wm_itrs_writereg(sc, wmq);
   10933 
   10934 	if (txmore || rxmore) {
   10935 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10936 		wm_sched_handle_queue(sc, wmq);
   10937 	} else
   10938 		wm_txrxintr_enable(wmq);
   10939 
   10940 	return 1;
   10941 }
   10942 
   10943 static void
   10944 wm_handle_queue(void *arg)
   10945 {
   10946 	struct wm_queue *wmq = arg;
   10947 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10948 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10949 	struct wm_softc *sc = txq->txq_sc;
   10950 	u_int txlimit = sc->sc_tx_process_limit;
   10951 	u_int rxlimit = sc->sc_rx_process_limit;
   10952 	bool txmore;
   10953 	bool rxmore;
   10954 
   10955 	mutex_enter(txq->txq_lock);
   10956 	if (txq->txq_stopping) {
   10957 		mutex_exit(txq->txq_lock);
   10958 		return;
   10959 	}
   10960 	txmore = wm_txeof(txq, txlimit);
   10961 	wm_deferred_start_locked(txq);
   10962 	mutex_exit(txq->txq_lock);
   10963 
   10964 	mutex_enter(rxq->rxq_lock);
   10965 	if (rxq->rxq_stopping) {
   10966 		mutex_exit(rxq->rxq_lock);
   10967 		return;
   10968 	}
   10969 	WM_Q_EVCNT_INCR(rxq, defer);
   10970 	rxmore = wm_rxeof(rxq, rxlimit);
   10971 	mutex_exit(rxq->rxq_lock);
   10972 
   10973 	if (txmore || rxmore) {
   10974 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10975 		wm_sched_handle_queue(sc, wmq);
   10976 	} else
   10977 		wm_txrxintr_enable(wmq);
   10978 }
   10979 
   10980 static void
   10981 wm_handle_queue_work(struct work *wk, void *context)
   10982 {
   10983 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   10984 
   10985 	/*
   10986 	 * Some qemu environment workaround.  They don't stop interrupt
   10987 	 * immediately.
   10988 	 */
   10989 	wmq->wmq_wq_enqueued = false;
   10990 	wm_handle_queue(wmq);
   10991 }
   10992 
   10993 /*
   10994  * wm_linkintr_msix:
   10995  *
   10996  *	Interrupt service routine for link status change for MSI-X.
   10997  */
   10998 static int
   10999 wm_linkintr_msix(void *arg)
   11000 {
   11001 	struct wm_softc *sc = arg;
   11002 	uint32_t reg;
   11003 	bool has_rxo;
   11004 
   11005 	reg = CSR_READ(sc, WMREG_ICR);
   11006 	mutex_enter(sc->sc_core_lock);
   11007 	DPRINTF(sc, WM_DEBUG_LINK,
   11008 	    ("%s: LINK: got link intr. ICR = %08x\n",
   11009 		device_xname(sc->sc_dev), reg));
   11010 
   11011 	if (sc->sc_core_stopping)
   11012 		goto out;
   11013 
   11014 	if ((reg & ICR_LSC) != 0) {
   11015 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   11016 		wm_linkintr(sc, ICR_LSC);
   11017 	}
   11018 	if ((reg & ICR_GPI(0)) != 0)
   11019 		device_printf(sc->sc_dev, "got module interrupt\n");
   11020 
   11021 	/*
   11022 	 * XXX 82574 MSI-X mode workaround
   11023 	 *
   11024 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   11025 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   11026 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   11027 	 * interrupts by writing WMREG_ICS to process receive packets.
   11028 	 */
   11029 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   11030 #if defined(WM_DEBUG)
   11031 		log(LOG_WARNING, "%s: Receive overrun\n",
   11032 		    device_xname(sc->sc_dev));
   11033 #endif /* defined(WM_DEBUG) */
   11034 
   11035 		has_rxo = true;
   11036 		/*
   11037 		 * The RXO interrupt is very high rate when receive traffic is
   11038 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   11039 		 * interrupts. ICR_OTHER will be enabled at the end of
   11040 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   11041 		 * ICR_RXQ(1) interrupts.
   11042 		 */
   11043 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   11044 
   11045 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   11046 	}
   11047 
   11048 
   11049 
   11050 out:
   11051 	mutex_exit(sc->sc_core_lock);
   11052 
   11053 	if (sc->sc_type == WM_T_82574) {
   11054 		if (!has_rxo)
   11055 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   11056 		else
   11057 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   11058 	} else if (sc->sc_type == WM_T_82575)
   11059 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   11060 	else
   11061 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   11062 
   11063 	return 1;
   11064 }
   11065 
   11066 /*
   11067  * Media related.
   11068  * GMII, SGMII, TBI (and SERDES)
   11069  */
   11070 
   11071 /* Common */
   11072 
   11073 /*
   11074  * wm_tbi_serdes_set_linkled:
   11075  *
   11076  *	Update the link LED on TBI and SERDES devices.
   11077  */
   11078 static void
   11079 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   11080 {
   11081 
   11082 	if (sc->sc_tbi_linkup)
   11083 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   11084 	else
   11085 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   11086 
   11087 	/* 82540 or newer devices are active low */
   11088 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   11089 
   11090 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11091 }
   11092 
   11093 /* GMII related */
   11094 
   11095 /*
   11096  * wm_gmii_reset:
   11097  *
   11098  *	Reset the PHY.
   11099  */
   11100 static void
   11101 wm_gmii_reset(struct wm_softc *sc)
   11102 {
   11103 	uint32_t reg;
   11104 	int rv;
   11105 
   11106 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11107 		device_xname(sc->sc_dev), __func__));
   11108 
   11109 	rv = sc->phy.acquire(sc);
   11110 	if (rv != 0) {
   11111 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11112 		    __func__);
   11113 		return;
   11114 	}
   11115 
   11116 	switch (sc->sc_type) {
   11117 	case WM_T_82542_2_0:
   11118 	case WM_T_82542_2_1:
   11119 		/* null */
   11120 		break;
   11121 	case WM_T_82543:
   11122 		/*
   11123 		 * With 82543, we need to force speed and duplex on the MAC
   11124 		 * equal to what the PHY speed and duplex configuration is.
   11125 		 * In addition, we need to perform a hardware reset on the PHY
   11126 		 * to take it out of reset.
   11127 		 */
   11128 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   11129 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11130 
   11131 		/* The PHY reset pin is active-low. */
   11132 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11133 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   11134 		    CTRL_EXT_SWDPIN(4));
   11135 		reg |= CTRL_EXT_SWDPIO(4);
   11136 
   11137 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11138 		CSR_WRITE_FLUSH(sc);
   11139 		delay(10*1000);
   11140 
   11141 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   11142 		CSR_WRITE_FLUSH(sc);
   11143 		delay(150);
   11144 #if 0
   11145 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   11146 #endif
   11147 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   11148 		break;
   11149 	case WM_T_82544:	/* Reset 10000us */
   11150 	case WM_T_82540:
   11151 	case WM_T_82545:
   11152 	case WM_T_82545_3:
   11153 	case WM_T_82546:
   11154 	case WM_T_82546_3:
   11155 	case WM_T_82541:
   11156 	case WM_T_82541_2:
   11157 	case WM_T_82547:
   11158 	case WM_T_82547_2:
   11159 	case WM_T_82571:	/* Reset 100us */
   11160 	case WM_T_82572:
   11161 	case WM_T_82573:
   11162 	case WM_T_82574:
   11163 	case WM_T_82575:
   11164 	case WM_T_82576:
   11165 	case WM_T_82580:
   11166 	case WM_T_I350:
   11167 	case WM_T_I354:
   11168 	case WM_T_I210:
   11169 	case WM_T_I211:
   11170 	case WM_T_82583:
   11171 	case WM_T_80003:
   11172 		/* Generic reset */
   11173 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   11174 		CSR_WRITE_FLUSH(sc);
   11175 		delay(20000);
   11176 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11177 		CSR_WRITE_FLUSH(sc);
   11178 		delay(20000);
   11179 
   11180 		if ((sc->sc_type == WM_T_82541)
   11181 		    || (sc->sc_type == WM_T_82541_2)
   11182 		    || (sc->sc_type == WM_T_82547)
   11183 		    || (sc->sc_type == WM_T_82547_2)) {
   11184 			/* Workaround for igp are done in igp_reset() */
   11185 			/* XXX add code to set LED after phy reset */
   11186 		}
   11187 		break;
   11188 	case WM_T_ICH8:
   11189 	case WM_T_ICH9:
   11190 	case WM_T_ICH10:
   11191 	case WM_T_PCH:
   11192 	case WM_T_PCH2:
   11193 	case WM_T_PCH_LPT:
   11194 	case WM_T_PCH_SPT:
   11195 	case WM_T_PCH_CNP:
   11196 		/* Generic reset */
   11197 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   11198 		CSR_WRITE_FLUSH(sc);
   11199 		delay(100);
   11200 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11201 		CSR_WRITE_FLUSH(sc);
   11202 		delay(150);
   11203 		break;
   11204 	default:
   11205 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   11206 		    __func__);
   11207 		break;
   11208 	}
   11209 
   11210 	sc->phy.release(sc);
   11211 
   11212 	/* get_cfg_done */
   11213 	wm_get_cfg_done(sc);
   11214 
   11215 	/* Extra setup */
   11216 	switch (sc->sc_type) {
   11217 	case WM_T_82542_2_0:
   11218 	case WM_T_82542_2_1:
   11219 	case WM_T_82543:
   11220 	case WM_T_82544:
   11221 	case WM_T_82540:
   11222 	case WM_T_82545:
   11223 	case WM_T_82545_3:
   11224 	case WM_T_82546:
   11225 	case WM_T_82546_3:
   11226 	case WM_T_82541_2:
   11227 	case WM_T_82547_2:
   11228 	case WM_T_82571:
   11229 	case WM_T_82572:
   11230 	case WM_T_82573:
   11231 	case WM_T_82574:
   11232 	case WM_T_82583:
   11233 	case WM_T_82575:
   11234 	case WM_T_82576:
   11235 	case WM_T_82580:
   11236 	case WM_T_I350:
   11237 	case WM_T_I354:
   11238 	case WM_T_I210:
   11239 	case WM_T_I211:
   11240 	case WM_T_80003:
   11241 		/* Null */
   11242 		break;
   11243 	case WM_T_82541:
   11244 	case WM_T_82547:
   11245 		/* XXX Configure actively LED after PHY reset */
   11246 		break;
   11247 	case WM_T_ICH8:
   11248 	case WM_T_ICH9:
   11249 	case WM_T_ICH10:
   11250 	case WM_T_PCH:
   11251 	case WM_T_PCH2:
   11252 	case WM_T_PCH_LPT:
   11253 	case WM_T_PCH_SPT:
   11254 	case WM_T_PCH_CNP:
   11255 		wm_phy_post_reset(sc);
   11256 		break;
   11257 	default:
   11258 		panic("%s: unknown type\n", __func__);
   11259 		break;
   11260 	}
   11261 }
   11262 
   11263 /*
   11264  * Set up sc_phytype and mii_{read|write}reg.
   11265  *
   11266  *  To identify PHY type, correct read/write function should be selected.
   11267  * To select correct read/write function, PCI ID or MAC type are required
   11268  * without accessing PHY registers.
   11269  *
   11270  *  On the first call of this function, PHY ID is not known yet. Check
   11271  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   11272  * result might be incorrect.
   11273  *
   11274  *  In the second call, PHY OUI and model is used to identify PHY type.
   11275  * It might not be perfect because of the lack of compared entry, but it
   11276  * would be better than the first call.
   11277  *
   11278  *  If the detected new result and previous assumption is different,
   11279  * a diagnostic message will be printed.
   11280  */
   11281 static void
   11282 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   11283     uint16_t phy_model)
   11284 {
   11285 	device_t dev = sc->sc_dev;
   11286 	struct mii_data *mii = &sc->sc_mii;
   11287 	uint16_t new_phytype = WMPHY_UNKNOWN;
   11288 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   11289 	mii_readreg_t new_readreg;
   11290 	mii_writereg_t new_writereg;
   11291 	bool dodiag = true;
   11292 
   11293 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11294 		device_xname(sc->sc_dev), __func__));
   11295 
   11296 	/*
   11297 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   11298 	 * incorrect. So don't print diag output when it's 2nd call.
   11299 	 */
   11300 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   11301 		dodiag = false;
   11302 
   11303 	if (mii->mii_readreg == NULL) {
   11304 		/*
   11305 		 *  This is the first call of this function. For ICH and PCH
   11306 		 * variants, it's difficult to determine the PHY access method
   11307 		 * by sc_type, so use the PCI product ID for some devices.
   11308 		 */
   11309 
   11310 		switch (sc->sc_pcidevid) {
   11311 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   11312 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   11313 			/* 82577 */
   11314 			new_phytype = WMPHY_82577;
   11315 			break;
   11316 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   11317 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   11318 			/* 82578 */
   11319 			new_phytype = WMPHY_82578;
   11320 			break;
   11321 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   11322 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   11323 			/* 82579 */
   11324 			new_phytype = WMPHY_82579;
   11325 			break;
   11326 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   11327 		case PCI_PRODUCT_INTEL_82801I_BM:
   11328 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   11329 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   11330 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   11331 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   11332 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   11333 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   11334 			/* ICH8, 9, 10 with 82567 */
   11335 			new_phytype = WMPHY_BM;
   11336 			break;
   11337 		default:
   11338 			break;
   11339 		}
   11340 	} else {
   11341 		/* It's not the first call. Use PHY OUI and model */
   11342 		switch (phy_oui) {
   11343 		case MII_OUI_ATTANSIC: /* atphy(4) */
   11344 			switch (phy_model) {
   11345 			case MII_MODEL_ATTANSIC_AR8021:
   11346 				new_phytype = WMPHY_82578;
   11347 				break;
   11348 			default:
   11349 				break;
   11350 			}
   11351 			break;
   11352 		case MII_OUI_xxMARVELL:
   11353 			switch (phy_model) {
   11354 			case MII_MODEL_xxMARVELL_I210:
   11355 				new_phytype = WMPHY_I210;
   11356 				break;
   11357 			case MII_MODEL_xxMARVELL_E1011:
   11358 			case MII_MODEL_xxMARVELL_E1000_3:
   11359 			case MII_MODEL_xxMARVELL_E1000_5:
   11360 			case MII_MODEL_xxMARVELL_E1112:
   11361 				new_phytype = WMPHY_M88;
   11362 				break;
   11363 			case MII_MODEL_xxMARVELL_E1149:
   11364 				new_phytype = WMPHY_BM;
   11365 				break;
   11366 			case MII_MODEL_xxMARVELL_E1111:
   11367 			case MII_MODEL_xxMARVELL_I347:
   11368 			case MII_MODEL_xxMARVELL_E1512:
   11369 			case MII_MODEL_xxMARVELL_E1340M:
   11370 			case MII_MODEL_xxMARVELL_E1543:
   11371 				new_phytype = WMPHY_M88;
   11372 				break;
   11373 			case MII_MODEL_xxMARVELL_I82563:
   11374 				new_phytype = WMPHY_GG82563;
   11375 				break;
   11376 			default:
   11377 				break;
   11378 			}
   11379 			break;
   11380 		case MII_OUI_INTEL:
   11381 			switch (phy_model) {
   11382 			case MII_MODEL_INTEL_I82577:
   11383 				new_phytype = WMPHY_82577;
   11384 				break;
   11385 			case MII_MODEL_INTEL_I82579:
   11386 				new_phytype = WMPHY_82579;
   11387 				break;
   11388 			case MII_MODEL_INTEL_I217:
   11389 				new_phytype = WMPHY_I217;
   11390 				break;
   11391 			case MII_MODEL_INTEL_I82580:
   11392 				new_phytype = WMPHY_82580;
   11393 				break;
   11394 			case MII_MODEL_INTEL_I350:
   11395 				new_phytype = WMPHY_I350;
   11396 				break;
   11397 			default:
   11398 				break;
   11399 			}
   11400 			break;
   11401 		case MII_OUI_yyINTEL:
   11402 			switch (phy_model) {
   11403 			case MII_MODEL_yyINTEL_I82562G:
   11404 			case MII_MODEL_yyINTEL_I82562EM:
   11405 			case MII_MODEL_yyINTEL_I82562ET:
   11406 				new_phytype = WMPHY_IFE;
   11407 				break;
   11408 			case MII_MODEL_yyINTEL_IGP01E1000:
   11409 				new_phytype = WMPHY_IGP;
   11410 				break;
   11411 			case MII_MODEL_yyINTEL_I82566:
   11412 				new_phytype = WMPHY_IGP_3;
   11413 				break;
   11414 			default:
   11415 				break;
   11416 			}
   11417 			break;
   11418 		default:
   11419 			break;
   11420 		}
   11421 
   11422 		if (dodiag) {
   11423 			if (new_phytype == WMPHY_UNKNOWN)
   11424 				aprint_verbose_dev(dev,
   11425 				    "%s: Unknown PHY model. OUI=%06x, "
   11426 				    "model=%04x\n", __func__, phy_oui,
   11427 				    phy_model);
   11428 
   11429 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11430 			    && (sc->sc_phytype != new_phytype)) {
   11431 				aprint_error_dev(dev, "Previously assumed PHY "
   11432 				    "type(%u) was incorrect. PHY type from PHY"
   11433 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   11434 			}
   11435 		}
   11436 	}
   11437 
   11438 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   11439 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   11440 		/* SGMII */
   11441 		new_readreg = wm_sgmii_readreg;
   11442 		new_writereg = wm_sgmii_writereg;
   11443 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11444 		/* BM2 (phyaddr == 1) */
   11445 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11446 		    && (new_phytype != WMPHY_BM)
   11447 		    && (new_phytype != WMPHY_UNKNOWN))
   11448 			doubt_phytype = new_phytype;
   11449 		new_phytype = WMPHY_BM;
   11450 		new_readreg = wm_gmii_bm_readreg;
   11451 		new_writereg = wm_gmii_bm_writereg;
   11452 	} else if (sc->sc_type >= WM_T_PCH) {
   11453 		/* All PCH* use _hv_ */
   11454 		new_readreg = wm_gmii_hv_readreg;
   11455 		new_writereg = wm_gmii_hv_writereg;
   11456 	} else if (sc->sc_type >= WM_T_ICH8) {
   11457 		/* non-82567 ICH8, 9 and 10 */
   11458 		new_readreg = wm_gmii_i82544_readreg;
   11459 		new_writereg = wm_gmii_i82544_writereg;
   11460 	} else if (sc->sc_type >= WM_T_80003) {
   11461 		/* 80003 */
   11462 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11463 		    && (new_phytype != WMPHY_GG82563)
   11464 		    && (new_phytype != WMPHY_UNKNOWN))
   11465 			doubt_phytype = new_phytype;
   11466 		new_phytype = WMPHY_GG82563;
   11467 		new_readreg = wm_gmii_i80003_readreg;
   11468 		new_writereg = wm_gmii_i80003_writereg;
   11469 	} else if (sc->sc_type >= WM_T_I210) {
   11470 		/* I210 and I211 */
   11471 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11472 		    && (new_phytype != WMPHY_I210)
   11473 		    && (new_phytype != WMPHY_UNKNOWN))
   11474 			doubt_phytype = new_phytype;
   11475 		new_phytype = WMPHY_I210;
   11476 		new_readreg = wm_gmii_gs40g_readreg;
   11477 		new_writereg = wm_gmii_gs40g_writereg;
   11478 	} else if (sc->sc_type >= WM_T_82580) {
   11479 		/* 82580, I350 and I354 */
   11480 		new_readreg = wm_gmii_82580_readreg;
   11481 		new_writereg = wm_gmii_82580_writereg;
   11482 	} else if (sc->sc_type >= WM_T_82544) {
   11483 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   11484 		new_readreg = wm_gmii_i82544_readreg;
   11485 		new_writereg = wm_gmii_i82544_writereg;
   11486 	} else {
   11487 		new_readreg = wm_gmii_i82543_readreg;
   11488 		new_writereg = wm_gmii_i82543_writereg;
   11489 	}
   11490 
   11491 	if (new_phytype == WMPHY_BM) {
   11492 		/* All BM use _bm_ */
   11493 		new_readreg = wm_gmii_bm_readreg;
   11494 		new_writereg = wm_gmii_bm_writereg;
   11495 	}
   11496 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   11497 		/* All PCH* use _hv_ */
   11498 		new_readreg = wm_gmii_hv_readreg;
   11499 		new_writereg = wm_gmii_hv_writereg;
   11500 	}
   11501 
   11502 	/* Diag output */
   11503 	if (dodiag) {
   11504 		if (doubt_phytype != WMPHY_UNKNOWN)
   11505 			aprint_error_dev(dev, "Assumed new PHY type was "
   11506 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   11507 			    new_phytype);
   11508 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11509 		    && (sc->sc_phytype != new_phytype))
   11510 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   11511 			    "was incorrect. New PHY type = %u\n",
   11512 			    sc->sc_phytype, new_phytype);
   11513 
   11514 		if ((mii->mii_readreg != NULL) &&
   11515 		    (new_phytype == WMPHY_UNKNOWN))
   11516 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   11517 
   11518 		if ((mii->mii_readreg != NULL) &&
   11519 		    (mii->mii_readreg != new_readreg))
   11520 			aprint_error_dev(dev, "Previously assumed PHY "
   11521 			    "read/write function was incorrect.\n");
   11522 	}
   11523 
   11524 	/* Update now */
   11525 	sc->sc_phytype = new_phytype;
   11526 	mii->mii_readreg = new_readreg;
   11527 	mii->mii_writereg = new_writereg;
   11528 	if (new_readreg == wm_gmii_hv_readreg) {
   11529 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   11530 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   11531 	} else if (new_readreg == wm_sgmii_readreg) {
   11532 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   11533 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   11534 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   11535 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   11536 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   11537 	}
   11538 }
   11539 
   11540 /*
   11541  * wm_get_phy_id_82575:
   11542  *
   11543  * Return PHY ID. Return -1 if it failed.
   11544  */
   11545 static int
   11546 wm_get_phy_id_82575(struct wm_softc *sc)
   11547 {
   11548 	uint32_t reg;
   11549 	int phyid = -1;
   11550 
   11551 	/* XXX */
   11552 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   11553 		return -1;
   11554 
   11555 	if (wm_sgmii_uses_mdio(sc)) {
   11556 		switch (sc->sc_type) {
   11557 		case WM_T_82575:
   11558 		case WM_T_82576:
   11559 			reg = CSR_READ(sc, WMREG_MDIC);
   11560 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   11561 			break;
   11562 		case WM_T_82580:
   11563 		case WM_T_I350:
   11564 		case WM_T_I354:
   11565 		case WM_T_I210:
   11566 		case WM_T_I211:
   11567 			reg = CSR_READ(sc, WMREG_MDICNFG);
   11568 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   11569 			break;
   11570 		default:
   11571 			return -1;
   11572 		}
   11573 	}
   11574 
   11575 	return phyid;
   11576 }
   11577 
   11578 /*
   11579  * wm_gmii_mediainit:
   11580  *
   11581  *	Initialize media for use on 1000BASE-T devices.
   11582  */
   11583 static void
   11584 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   11585 {
   11586 	device_t dev = sc->sc_dev;
   11587 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11588 	struct mii_data *mii = &sc->sc_mii;
   11589 
   11590 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11591 		device_xname(sc->sc_dev), __func__));
   11592 
   11593 	/* We have GMII. */
   11594 	sc->sc_flags |= WM_F_HAS_MII;
   11595 
   11596 	if (sc->sc_type == WM_T_80003)
   11597 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11598 	else
   11599 		sc->sc_tipg = TIPG_1000T_DFLT;
   11600 
   11601 	/*
   11602 	 * Let the chip set speed/duplex on its own based on
   11603 	 * signals from the PHY.
   11604 	 * XXXbouyer - I'm not sure this is right for the 80003,
   11605 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   11606 	 */
   11607 	sc->sc_ctrl |= CTRL_SLU;
   11608 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11609 
   11610 	/* Initialize our media structures and probe the GMII. */
   11611 	mii->mii_ifp = ifp;
   11612 
   11613 	mii->mii_statchg = wm_gmii_statchg;
   11614 
   11615 	/* get PHY control from SMBus to PCIe */
   11616 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   11617 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   11618 	    || (sc->sc_type == WM_T_PCH_CNP))
   11619 		wm_init_phy_workarounds_pchlan(sc);
   11620 
   11621 	wm_gmii_reset(sc);
   11622 
   11623 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11624 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   11625 	    wm_gmii_mediastatus, sc->sc_core_lock);
   11626 
   11627 	/* Setup internal SGMII PHY for SFP */
   11628 	wm_sgmii_sfp_preconfig(sc);
   11629 
   11630 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   11631 	    || (sc->sc_type == WM_T_82580)
   11632 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   11633 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   11634 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   11635 			/* Attach only one port */
   11636 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   11637 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11638 		} else {
   11639 			int i, id;
   11640 			uint32_t ctrl_ext;
   11641 
   11642 			id = wm_get_phy_id_82575(sc);
   11643 			if (id != -1) {
   11644 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   11645 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   11646 			}
   11647 			if ((id == -1)
   11648 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11649 				/* Power on sgmii phy if it is disabled */
   11650 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11651 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   11652 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   11653 				CSR_WRITE_FLUSH(sc);
   11654 				delay(300*1000); /* XXX too long */
   11655 
   11656 				/*
   11657 				 * From 1 to 8.
   11658 				 *
   11659 				 * I2C access fails with I2C register's ERROR
   11660 				 * bit set, so prevent error message while
   11661 				 * scanning.
   11662 				 */
   11663 				sc->phy.no_errprint = true;
   11664 				for (i = 1; i < 8; i++)
   11665 					mii_attach(sc->sc_dev, &sc->sc_mii,
   11666 					    0xffffffff, i, MII_OFFSET_ANY,
   11667 					    MIIF_DOPAUSE);
   11668 				sc->phy.no_errprint = false;
   11669 
   11670 				/* Restore previous sfp cage power state */
   11671 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11672 			}
   11673 		}
   11674 	} else
   11675 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11676 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11677 
   11678 	/*
   11679 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   11680 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   11681 	 */
   11682 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   11683 		|| (sc->sc_type == WM_T_PCH_SPT)
   11684 		|| (sc->sc_type == WM_T_PCH_CNP))
   11685 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11686 		wm_set_mdio_slow_mode_hv(sc);
   11687 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11688 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11689 	}
   11690 
   11691 	/*
   11692 	 * (For ICH8 variants)
   11693 	 * If PHY detection failed, use BM's r/w function and retry.
   11694 	 */
   11695 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11696 		/* if failed, retry with *_bm_* */
   11697 		aprint_verbose_dev(dev, "Assumed PHY access function "
   11698 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   11699 		    sc->sc_phytype);
   11700 		sc->sc_phytype = WMPHY_BM;
   11701 		mii->mii_readreg = wm_gmii_bm_readreg;
   11702 		mii->mii_writereg = wm_gmii_bm_writereg;
   11703 
   11704 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11705 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11706 	}
   11707 
   11708 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11709 		/* Any PHY wasn't found */
   11710 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   11711 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   11712 		sc->sc_phytype = WMPHY_NONE;
   11713 	} else {
   11714 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   11715 
   11716 		/*
   11717 		 * PHY found! Check PHY type again by the second call of
   11718 		 * wm_gmii_setup_phytype.
   11719 		 */
   11720 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   11721 		    child->mii_mpd_model);
   11722 
   11723 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   11724 	}
   11725 }
   11726 
   11727 /*
   11728  * wm_gmii_mediachange:	[ifmedia interface function]
   11729  *
   11730  *	Set hardware to newly-selected media on a 1000BASE-T device.
   11731  */
   11732 static int
   11733 wm_gmii_mediachange(struct ifnet *ifp)
   11734 {
   11735 	struct wm_softc *sc = ifp->if_softc;
   11736 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11737 	uint32_t reg;
   11738 	int rc;
   11739 
   11740 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11741 		device_xname(sc->sc_dev), __func__));
   11742 
   11743 	KASSERT(mutex_owned(sc->sc_core_lock));
   11744 
   11745 	if ((sc->sc_if_flags & IFF_UP) == 0)
   11746 		return 0;
   11747 
   11748 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   11749 	if ((sc->sc_type == WM_T_82580)
   11750 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   11751 	    || (sc->sc_type == WM_T_I211)) {
   11752 		reg = CSR_READ(sc, WMREG_PHPM);
   11753 		reg &= ~PHPM_GO_LINK_D;
   11754 		CSR_WRITE(sc, WMREG_PHPM, reg);
   11755 	}
   11756 
   11757 	/* Disable D0 LPLU. */
   11758 	wm_lplu_d0_disable(sc);
   11759 
   11760 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   11761 	sc->sc_ctrl |= CTRL_SLU;
   11762 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11763 	    || (sc->sc_type > WM_T_82543)) {
   11764 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   11765 	} else {
   11766 		sc->sc_ctrl &= ~CTRL_ASDE;
   11767 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   11768 		if (ife->ifm_media & IFM_FDX)
   11769 			sc->sc_ctrl |= CTRL_FD;
   11770 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   11771 		case IFM_10_T:
   11772 			sc->sc_ctrl |= CTRL_SPEED_10;
   11773 			break;
   11774 		case IFM_100_TX:
   11775 			sc->sc_ctrl |= CTRL_SPEED_100;
   11776 			break;
   11777 		case IFM_1000_T:
   11778 			sc->sc_ctrl |= CTRL_SPEED_1000;
   11779 			break;
   11780 		case IFM_NONE:
   11781 			/* There is no specific setting for IFM_NONE */
   11782 			break;
   11783 		default:
   11784 			panic("wm_gmii_mediachange: bad media 0x%x",
   11785 			    ife->ifm_media);
   11786 		}
   11787 	}
   11788 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11789 	CSR_WRITE_FLUSH(sc);
   11790 
   11791 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11792 		wm_serdes_mediachange(ifp);
   11793 
   11794 	if (sc->sc_type <= WM_T_82543)
   11795 		wm_gmii_reset(sc);
   11796 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   11797 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   11798 		/* allow time for SFP cage time to power up phy */
   11799 		delay(300 * 1000);
   11800 		wm_gmii_reset(sc);
   11801 	}
   11802 
   11803 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   11804 		return 0;
   11805 	return rc;
   11806 }
   11807 
   11808 /*
   11809  * wm_gmii_mediastatus:	[ifmedia interface function]
   11810  *
   11811  *	Get the current interface media status on a 1000BASE-T device.
   11812  */
   11813 static void
   11814 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11815 {
   11816 	struct wm_softc *sc = ifp->if_softc;
   11817 
   11818 	KASSERT(mutex_owned(sc->sc_core_lock));
   11819 
   11820 	ether_mediastatus(ifp, ifmr);
   11821 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11822 	    | sc->sc_flowflags;
   11823 }
   11824 
   11825 #define	MDI_IO		CTRL_SWDPIN(2)
   11826 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   11827 #define	MDI_CLK		CTRL_SWDPIN(3)
   11828 
   11829 static void
   11830 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   11831 {
   11832 	uint32_t i, v;
   11833 
   11834 	v = CSR_READ(sc, WMREG_CTRL);
   11835 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11836 	v |= MDI_DIR | CTRL_SWDPIO(3);
   11837 
   11838 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   11839 		if (data & i)
   11840 			v |= MDI_IO;
   11841 		else
   11842 			v &= ~MDI_IO;
   11843 		CSR_WRITE(sc, WMREG_CTRL, v);
   11844 		CSR_WRITE_FLUSH(sc);
   11845 		delay(10);
   11846 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11847 		CSR_WRITE_FLUSH(sc);
   11848 		delay(10);
   11849 		CSR_WRITE(sc, WMREG_CTRL, v);
   11850 		CSR_WRITE_FLUSH(sc);
   11851 		delay(10);
   11852 	}
   11853 }
   11854 
   11855 static uint16_t
   11856 wm_i82543_mii_recvbits(struct wm_softc *sc)
   11857 {
   11858 	uint32_t v, i;
   11859 	uint16_t data = 0;
   11860 
   11861 	v = CSR_READ(sc, WMREG_CTRL);
   11862 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11863 	v |= CTRL_SWDPIO(3);
   11864 
   11865 	CSR_WRITE(sc, WMREG_CTRL, v);
   11866 	CSR_WRITE_FLUSH(sc);
   11867 	delay(10);
   11868 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11869 	CSR_WRITE_FLUSH(sc);
   11870 	delay(10);
   11871 	CSR_WRITE(sc, WMREG_CTRL, v);
   11872 	CSR_WRITE_FLUSH(sc);
   11873 	delay(10);
   11874 
   11875 	for (i = 0; i < 16; i++) {
   11876 		data <<= 1;
   11877 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11878 		CSR_WRITE_FLUSH(sc);
   11879 		delay(10);
   11880 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   11881 			data |= 1;
   11882 		CSR_WRITE(sc, WMREG_CTRL, v);
   11883 		CSR_WRITE_FLUSH(sc);
   11884 		delay(10);
   11885 	}
   11886 
   11887 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11888 	CSR_WRITE_FLUSH(sc);
   11889 	delay(10);
   11890 	CSR_WRITE(sc, WMREG_CTRL, v);
   11891 	CSR_WRITE_FLUSH(sc);
   11892 	delay(10);
   11893 
   11894 	return data;
   11895 }
   11896 
   11897 #undef MDI_IO
   11898 #undef MDI_DIR
   11899 #undef MDI_CLK
   11900 
   11901 /*
   11902  * wm_gmii_i82543_readreg:	[mii interface function]
   11903  *
   11904  *	Read a PHY register on the GMII (i82543 version).
   11905  */
   11906 static int
   11907 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11908 {
   11909 	struct wm_softc *sc = device_private(dev);
   11910 
   11911 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11912 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   11913 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   11914 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   11915 
   11916 	DPRINTF(sc, WM_DEBUG_GMII,
   11917 	    ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   11918 		device_xname(dev), phy, reg, *val));
   11919 
   11920 	return 0;
   11921 }
   11922 
   11923 /*
   11924  * wm_gmii_i82543_writereg:	[mii interface function]
   11925  *
   11926  *	Write a PHY register on the GMII (i82543 version).
   11927  */
   11928 static int
   11929 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   11930 {
   11931 	struct wm_softc *sc = device_private(dev);
   11932 
   11933 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11934 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   11935 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   11936 	    (MII_COMMAND_START << 30), 32);
   11937 
   11938 	return 0;
   11939 }
   11940 
   11941 /*
   11942  * wm_gmii_mdic_readreg:	[mii interface function]
   11943  *
   11944  *	Read a PHY register on the GMII.
   11945  */
   11946 static int
   11947 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11948 {
   11949 	struct wm_softc *sc = device_private(dev);
   11950 	uint32_t mdic = 0;
   11951 	int i;
   11952 
   11953 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11954 	    && (reg > MII_ADDRMASK)) {
   11955 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11956 		    __func__, sc->sc_phytype, reg);
   11957 		reg &= MII_ADDRMASK;
   11958 	}
   11959 
   11960 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   11961 	    MDIC_REGADD(reg));
   11962 
   11963 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11964 		delay(50);
   11965 		mdic = CSR_READ(sc, WMREG_MDIC);
   11966 		if (mdic & MDIC_READY)
   11967 			break;
   11968 	}
   11969 
   11970 	if ((mdic & MDIC_READY) == 0) {
   11971 		DPRINTF(sc, WM_DEBUG_GMII,
   11972 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   11973 			device_xname(dev), phy, reg));
   11974 		return ETIMEDOUT;
   11975 	} else if (mdic & MDIC_E) {
   11976 		/* This is normal if no PHY is present. */
   11977 		DPRINTF(sc, WM_DEBUG_GMII,
   11978 		    ("%s: MDIC read error: phy %d reg %d\n",
   11979 			device_xname(sc->sc_dev), phy, reg));
   11980 		return -1;
   11981 	} else
   11982 		*val = MDIC_DATA(mdic);
   11983 
   11984 	/*
   11985 	 * Allow some time after each MDIC transaction to avoid
   11986 	 * reading duplicate data in the next MDIC transaction.
   11987 	 */
   11988 	if (sc->sc_type == WM_T_PCH2)
   11989 		delay(100);
   11990 
   11991 	return 0;
   11992 }
   11993 
   11994 /*
   11995  * wm_gmii_mdic_writereg:	[mii interface function]
   11996  *
   11997  *	Write a PHY register on the GMII.
   11998  */
   11999 static int
   12000 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   12001 {
   12002 	struct wm_softc *sc = device_private(dev);
   12003 	uint32_t mdic = 0;
   12004 	int i;
   12005 
   12006 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   12007 	    && (reg > MII_ADDRMASK)) {
   12008 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12009 		    __func__, sc->sc_phytype, reg);
   12010 		reg &= MII_ADDRMASK;
   12011 	}
   12012 
   12013 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   12014 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   12015 
   12016 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   12017 		delay(50);
   12018 		mdic = CSR_READ(sc, WMREG_MDIC);
   12019 		if (mdic & MDIC_READY)
   12020 			break;
   12021 	}
   12022 
   12023 	if ((mdic & MDIC_READY) == 0) {
   12024 		DPRINTF(sc, WM_DEBUG_GMII,
   12025 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   12026 			device_xname(dev), phy, reg));
   12027 		return ETIMEDOUT;
   12028 	} else if (mdic & MDIC_E) {
   12029 		DPRINTF(sc, WM_DEBUG_GMII,
   12030 		    ("%s: MDIC write error: phy %d reg %d\n",
   12031 			device_xname(dev), phy, reg));
   12032 		return -1;
   12033 	}
   12034 
   12035 	/*
   12036 	 * Allow some time after each MDIC transaction to avoid
   12037 	 * reading duplicate data in the next MDIC transaction.
   12038 	 */
   12039 	if (sc->sc_type == WM_T_PCH2)
   12040 		delay(100);
   12041 
   12042 	return 0;
   12043 }
   12044 
   12045 /*
   12046  * wm_gmii_i82544_readreg:	[mii interface function]
   12047  *
   12048  *	Read a PHY register on the GMII.
   12049  */
   12050 static int
   12051 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12052 {
   12053 	struct wm_softc *sc = device_private(dev);
   12054 	int rv;
   12055 
   12056 	rv = sc->phy.acquire(sc);
   12057 	if (rv != 0) {
   12058 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12059 		return rv;
   12060 	}
   12061 
   12062 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   12063 
   12064 	sc->phy.release(sc);
   12065 
   12066 	return rv;
   12067 }
   12068 
   12069 static int
   12070 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12071 {
   12072 	struct wm_softc *sc = device_private(dev);
   12073 	int rv;
   12074 
   12075 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   12076 		switch (sc->sc_phytype) {
   12077 		case WMPHY_IGP:
   12078 		case WMPHY_IGP_2:
   12079 		case WMPHY_IGP_3:
   12080 			rv = wm_gmii_mdic_writereg(dev, phy,
   12081 			    IGPHY_PAGE_SELECT, reg);
   12082 			if (rv != 0)
   12083 				return rv;
   12084 			break;
   12085 		default:
   12086 #ifdef WM_DEBUG
   12087 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   12088 			    __func__, sc->sc_phytype, reg);
   12089 #endif
   12090 			break;
   12091 		}
   12092 	}
   12093 
   12094 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   12095 }
   12096 
   12097 /*
   12098  * wm_gmii_i82544_writereg:	[mii interface function]
   12099  *
   12100  *	Write a PHY register on the GMII.
   12101  */
   12102 static int
   12103 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   12104 {
   12105 	struct wm_softc *sc = device_private(dev);
   12106 	int rv;
   12107 
   12108 	rv = sc->phy.acquire(sc);
   12109 	if (rv != 0) {
   12110 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12111 		return rv;
   12112 	}
   12113 
   12114 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   12115 	sc->phy.release(sc);
   12116 
   12117 	return rv;
   12118 }
   12119 
   12120 static int
   12121 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12122 {
   12123 	struct wm_softc *sc = device_private(dev);
   12124 	int rv;
   12125 
   12126 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   12127 		switch (sc->sc_phytype) {
   12128 		case WMPHY_IGP:
   12129 		case WMPHY_IGP_2:
   12130 		case WMPHY_IGP_3:
   12131 			rv = wm_gmii_mdic_writereg(dev, phy,
   12132 			    IGPHY_PAGE_SELECT, reg);
   12133 			if (rv != 0)
   12134 				return rv;
   12135 			break;
   12136 		default:
   12137 #ifdef WM_DEBUG
   12138 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   12139 			    __func__, sc->sc_phytype, reg);
   12140 #endif
   12141 			break;
   12142 		}
   12143 	}
   12144 
   12145 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   12146 }
   12147 
   12148 /*
   12149  * wm_gmii_i80003_readreg:	[mii interface function]
   12150  *
   12151  *	Read a PHY register on the kumeran
   12152  * This could be handled by the PHY layer if we didn't have to lock the
   12153  * resource ...
   12154  */
   12155 static int
   12156 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12157 {
   12158 	struct wm_softc *sc = device_private(dev);
   12159 	int page_select;
   12160 	uint16_t temp, temp2;
   12161 	int rv;
   12162 
   12163 	if (phy != 1) /* Only one PHY on kumeran bus */
   12164 		return -1;
   12165 
   12166 	rv = sc->phy.acquire(sc);
   12167 	if (rv != 0) {
   12168 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12169 		return rv;
   12170 	}
   12171 
   12172 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   12173 		page_select = GG82563_PHY_PAGE_SELECT;
   12174 	else {
   12175 		/*
   12176 		 * Use Alternative Page Select register to access registers
   12177 		 * 30 and 31.
   12178 		 */
   12179 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   12180 	}
   12181 	temp = reg >> GG82563_PAGE_SHIFT;
   12182 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   12183 		goto out;
   12184 
   12185 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   12186 		/*
   12187 		 * Wait more 200us for a bug of the ready bit in the MDIC
   12188 		 * register.
   12189 		 */
   12190 		delay(200);
   12191 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   12192 		if ((rv != 0) || (temp2 != temp)) {
   12193 			device_printf(dev, "%s failed\n", __func__);
   12194 			rv = -1;
   12195 			goto out;
   12196 		}
   12197 		delay(200);
   12198 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   12199 		delay(200);
   12200 	} else
   12201 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   12202 
   12203 out:
   12204 	sc->phy.release(sc);
   12205 	return rv;
   12206 }
   12207 
   12208 /*
   12209  * wm_gmii_i80003_writereg:	[mii interface function]
   12210  *
   12211  *	Write a PHY register on the kumeran.
   12212  * This could be handled by the PHY layer if we didn't have to lock the
   12213  * resource ...
   12214  */
   12215 static int
   12216 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   12217 {
   12218 	struct wm_softc *sc = device_private(dev);
   12219 	int page_select, rv;
   12220 	uint16_t temp, temp2;
   12221 
   12222 	if (phy != 1) /* Only one PHY on kumeran bus */
   12223 		return -1;
   12224 
   12225 	rv = sc->phy.acquire(sc);
   12226 	if (rv != 0) {
   12227 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12228 		return rv;
   12229 	}
   12230 
   12231 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   12232 		page_select = GG82563_PHY_PAGE_SELECT;
   12233 	else {
   12234 		/*
   12235 		 * Use Alternative Page Select register to access registers
   12236 		 * 30 and 31.
   12237 		 */
   12238 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   12239 	}
   12240 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   12241 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   12242 		goto out;
   12243 
   12244 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   12245 		/*
   12246 		 * Wait more 200us for a bug of the ready bit in the MDIC
   12247 		 * register.
   12248 		 */
   12249 		delay(200);
   12250 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   12251 		if ((rv != 0) || (temp2 != temp)) {
   12252 			device_printf(dev, "%s failed\n", __func__);
   12253 			rv = -1;
   12254 			goto out;
   12255 		}
   12256 		delay(200);
   12257 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   12258 		delay(200);
   12259 	} else
   12260 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   12261 
   12262 out:
   12263 	sc->phy.release(sc);
   12264 	return rv;
   12265 }
   12266 
   12267 /*
   12268  * wm_gmii_bm_readreg:	[mii interface function]
   12269  *
   12270  *	Read a PHY register on the kumeran
   12271  * This could be handled by the PHY layer if we didn't have to lock the
   12272  * resource ...
   12273  */
   12274 static int
   12275 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12276 {
   12277 	struct wm_softc *sc = device_private(dev);
   12278 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   12279 	int rv;
   12280 
   12281 	rv = sc->phy.acquire(sc);
   12282 	if (rv != 0) {
   12283 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12284 		return rv;
   12285 	}
   12286 
   12287 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   12288 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   12289 		    || (reg == 31)) ? 1 : phy;
   12290 	/* Page 800 works differently than the rest so it has its own func */
   12291 	if (page == BM_WUC_PAGE) {
   12292 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   12293 		goto release;
   12294 	}
   12295 
   12296 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   12297 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   12298 		    && (sc->sc_type != WM_T_82583))
   12299 			rv = wm_gmii_mdic_writereg(dev, phy,
   12300 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12301 		else
   12302 			rv = wm_gmii_mdic_writereg(dev, phy,
   12303 			    BME1000_PHY_PAGE_SELECT, page);
   12304 		if (rv != 0)
   12305 			goto release;
   12306 	}
   12307 
   12308 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   12309 
   12310 release:
   12311 	sc->phy.release(sc);
   12312 	return rv;
   12313 }
   12314 
   12315 /*
   12316  * wm_gmii_bm_writereg:	[mii interface function]
   12317  *
   12318  *	Write a PHY register on the kumeran.
   12319  * This could be handled by the PHY layer if we didn't have to lock the
   12320  * resource ...
   12321  */
   12322 static int
   12323 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   12324 {
   12325 	struct wm_softc *sc = device_private(dev);
   12326 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   12327 	int rv;
   12328 
   12329 	rv = sc->phy.acquire(sc);
   12330 	if (rv != 0) {
   12331 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12332 		return rv;
   12333 	}
   12334 
   12335 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   12336 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   12337 		    || (reg == 31)) ? 1 : phy;
   12338 	/* Page 800 works differently than the rest so it has its own func */
   12339 	if (page == BM_WUC_PAGE) {
   12340 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   12341 		goto release;
   12342 	}
   12343 
   12344 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   12345 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   12346 		    && (sc->sc_type != WM_T_82583))
   12347 			rv = wm_gmii_mdic_writereg(dev, phy,
   12348 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12349 		else
   12350 			rv = wm_gmii_mdic_writereg(dev, phy,
   12351 			    BME1000_PHY_PAGE_SELECT, page);
   12352 		if (rv != 0)
   12353 			goto release;
   12354 	}
   12355 
   12356 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   12357 
   12358 release:
   12359 	sc->phy.release(sc);
   12360 	return rv;
   12361 }
   12362 
   12363 /*
   12364  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   12365  *  @dev: pointer to the HW structure
   12366  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   12367  *
   12368  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   12369  *  address to store contents of the BM_WUC_ENABLE_REG register.
   12370  */
   12371 static int
   12372 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   12373 {
   12374 #ifdef WM_DEBUG
   12375 	struct wm_softc *sc = device_private(dev);
   12376 #endif
   12377 	uint16_t temp;
   12378 	int rv;
   12379 
   12380 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   12381 		device_xname(dev), __func__));
   12382 
   12383 	if (!phy_regp)
   12384 		return -1;
   12385 
   12386 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   12387 
   12388 	/* Select Port Control Registers page */
   12389 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12390 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   12391 	if (rv != 0)
   12392 		return rv;
   12393 
   12394 	/* Read WUCE and save it */
   12395 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   12396 	if (rv != 0)
   12397 		return rv;
   12398 
   12399 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   12400 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   12401 	 */
   12402 	temp = *phy_regp;
   12403 	temp |= BM_WUC_ENABLE_BIT;
   12404 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   12405 
   12406 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   12407 		return rv;
   12408 
   12409 	/* Select Host Wakeup Registers page - caller now able to write
   12410 	 * registers on the Wakeup registers page
   12411 	 */
   12412 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12413 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   12414 }
   12415 
   12416 /*
   12417  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   12418  *  @dev: pointer to the HW structure
   12419  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   12420  *
   12421  *  Restore BM_WUC_ENABLE_REG to its original value.
   12422  *
   12423  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   12424  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   12425  *  caller.
   12426  */
   12427 static int
   12428 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   12429 {
   12430 #ifdef WM_DEBUG
   12431 	struct wm_softc *sc = device_private(dev);
   12432 #endif
   12433 
   12434 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   12435 		device_xname(dev), __func__));
   12436 
   12437 	if (!phy_regp)
   12438 		return -1;
   12439 
   12440 	/* Select Port Control Registers page */
   12441 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12442 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   12443 
   12444 	/* Restore 769.17 to its original value */
   12445 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   12446 
   12447 	return 0;
   12448 }
   12449 
   12450 /*
   12451  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   12452  *  @sc: pointer to the HW structure
   12453  *  @offset: register offset to be read or written
   12454  *  @val: pointer to the data to read or write
   12455  *  @rd: determines if operation is read or write
   12456  *  @page_set: BM_WUC_PAGE already set and access enabled
   12457  *
   12458  *  Read the PHY register at offset and store the retrieved information in
   12459  *  data, or write data to PHY register at offset.  Note the procedure to
   12460  *  access the PHY wakeup registers is different than reading the other PHY
   12461  *  registers. It works as such:
   12462  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   12463  *  2) Set page to 800 for host (801 if we were manageability)
   12464  *  3) Write the address using the address opcode (0x11)
   12465  *  4) Read or write the data using the data opcode (0x12)
   12466  *  5) Restore 769.17.2 to its original value
   12467  *
   12468  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   12469  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   12470  *
   12471  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   12472  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   12473  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   12474  */
   12475 static int
   12476 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   12477     bool page_set)
   12478 {
   12479 	struct wm_softc *sc = device_private(dev);
   12480 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   12481 	uint16_t page = BM_PHY_REG_PAGE(offset);
   12482 	uint16_t wuce;
   12483 	int rv = 0;
   12484 
   12485 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12486 		device_xname(dev), __func__));
   12487 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   12488 	if ((sc->sc_type == WM_T_PCH)
   12489 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   12490 		device_printf(dev,
   12491 		    "Attempting to access page %d while gig enabled.\n", page);
   12492 	}
   12493 
   12494 	if (!page_set) {
   12495 		/* Enable access to PHY wakeup registers */
   12496 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   12497 		if (rv != 0) {
   12498 			device_printf(dev,
   12499 			    "%s: Could not enable PHY wakeup reg access\n",
   12500 			    __func__);
   12501 			return rv;
   12502 		}
   12503 	}
   12504 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   12505 		device_xname(sc->sc_dev), __func__, page, regnum));
   12506 
   12507 	/*
   12508 	 * 2) Access PHY wakeup register.
   12509 	 * See wm_access_phy_wakeup_reg_bm.
   12510 	 */
   12511 
   12512 	/* Write the Wakeup register page offset value using opcode 0x11 */
   12513 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   12514 	if (rv != 0)
   12515 		return rv;
   12516 
   12517 	if (rd) {
   12518 		/* Read the Wakeup register page value using opcode 0x12 */
   12519 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   12520 	} else {
   12521 		/* Write the Wakeup register page value using opcode 0x12 */
   12522 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   12523 	}
   12524 	if (rv != 0)
   12525 		return rv;
   12526 
   12527 	if (!page_set)
   12528 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   12529 
   12530 	return rv;
   12531 }
   12532 
   12533 /*
   12534  * wm_gmii_hv_readreg:	[mii interface function]
   12535  *
   12536  *	Read a PHY register on the kumeran
   12537  * This could be handled by the PHY layer if we didn't have to lock the
   12538  * resource ...
   12539  */
   12540 static int
   12541 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12542 {
   12543 	struct wm_softc *sc = device_private(dev);
   12544 	int rv;
   12545 
   12546 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12547 		device_xname(dev), __func__));
   12548 
   12549 	rv = sc->phy.acquire(sc);
   12550 	if (rv != 0) {
   12551 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12552 		return rv;
   12553 	}
   12554 
   12555 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   12556 	sc->phy.release(sc);
   12557 	return rv;
   12558 }
   12559 
   12560 static int
   12561 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12562 {
   12563 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12564 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12565 	int rv;
   12566 
   12567 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12568 
   12569 	/* Page 800 works differently than the rest so it has its own func */
   12570 	if (page == BM_WUC_PAGE)
   12571 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   12572 
   12573 	/*
   12574 	 * Lower than page 768 works differently than the rest so it has its
   12575 	 * own func
   12576 	 */
   12577 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12578 		device_printf(dev, "gmii_hv_readreg!!!\n");
   12579 		return -1;
   12580 	}
   12581 
   12582 	/*
   12583 	 * XXX I21[789] documents say that the SMBus Address register is at
   12584 	 * PHY address 01, Page 0 (not 768), Register 26.
   12585 	 */
   12586 	if (page == HV_INTC_FC_PAGE_START)
   12587 		page = 0;
   12588 
   12589 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12590 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12591 		    page << BME1000_PAGE_SHIFT);
   12592 		if (rv != 0)
   12593 			return rv;
   12594 	}
   12595 
   12596 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   12597 }
   12598 
   12599 /*
   12600  * wm_gmii_hv_writereg:	[mii interface function]
   12601  *
   12602  *	Write a PHY register on the kumeran.
   12603  * This could be handled by the PHY layer if we didn't have to lock the
   12604  * resource ...
   12605  */
   12606 static int
   12607 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   12608 {
   12609 	struct wm_softc *sc = device_private(dev);
   12610 	int rv;
   12611 
   12612 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12613 		device_xname(dev), __func__));
   12614 
   12615 	rv = sc->phy.acquire(sc);
   12616 	if (rv != 0) {
   12617 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12618 		return rv;
   12619 	}
   12620 
   12621 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   12622 	sc->phy.release(sc);
   12623 
   12624 	return rv;
   12625 }
   12626 
   12627 static int
   12628 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12629 {
   12630 	struct wm_softc *sc = device_private(dev);
   12631 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12632 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12633 	int rv;
   12634 
   12635 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12636 
   12637 	/* Page 800 works differently than the rest so it has its own func */
   12638 	if (page == BM_WUC_PAGE)
   12639 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   12640 		    false);
   12641 
   12642 	/*
   12643 	 * Lower than page 768 works differently than the rest so it has its
   12644 	 * own func
   12645 	 */
   12646 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12647 		device_printf(dev, "gmii_hv_writereg!!!\n");
   12648 		return -1;
   12649 	}
   12650 
   12651 	{
   12652 		/*
   12653 		 * XXX I21[789] documents say that the SMBus Address register
   12654 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   12655 		 */
   12656 		if (page == HV_INTC_FC_PAGE_START)
   12657 			page = 0;
   12658 
   12659 		/*
   12660 		 * XXX Workaround MDIO accesses being disabled after entering
   12661 		 * IEEE Power Down (whenever bit 11 of the PHY control
   12662 		 * register is set)
   12663 		 */
   12664 		if (sc->sc_phytype == WMPHY_82578) {
   12665 			struct mii_softc *child;
   12666 
   12667 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12668 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   12669 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   12670 			    && ((val & (1 << 11)) != 0)) {
   12671 				device_printf(dev, "XXX need workaround\n");
   12672 			}
   12673 		}
   12674 
   12675 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12676 			rv = wm_gmii_mdic_writereg(dev, 1,
   12677 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12678 			if (rv != 0)
   12679 				return rv;
   12680 		}
   12681 	}
   12682 
   12683 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   12684 }
   12685 
   12686 /*
   12687  * wm_gmii_82580_readreg:	[mii interface function]
   12688  *
   12689  *	Read a PHY register on the 82580 and I350.
   12690  * This could be handled by the PHY layer if we didn't have to lock the
   12691  * resource ...
   12692  */
   12693 static int
   12694 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12695 {
   12696 	struct wm_softc *sc = device_private(dev);
   12697 	int rv;
   12698 
   12699 	rv = sc->phy.acquire(sc);
   12700 	if (rv != 0) {
   12701 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12702 		return rv;
   12703 	}
   12704 
   12705 #ifdef DIAGNOSTIC
   12706 	if (reg > MII_ADDRMASK) {
   12707 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12708 		    __func__, sc->sc_phytype, reg);
   12709 		reg &= MII_ADDRMASK;
   12710 	}
   12711 #endif
   12712 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   12713 
   12714 	sc->phy.release(sc);
   12715 	return rv;
   12716 }
   12717 
   12718 /*
   12719  * wm_gmii_82580_writereg:	[mii interface function]
   12720  *
   12721  *	Write a PHY register on the 82580 and I350.
   12722  * This could be handled by the PHY layer if we didn't have to lock the
   12723  * resource ...
   12724  */
   12725 static int
   12726 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   12727 {
   12728 	struct wm_softc *sc = device_private(dev);
   12729 	int rv;
   12730 
   12731 	rv = sc->phy.acquire(sc);
   12732 	if (rv != 0) {
   12733 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12734 		return rv;
   12735 	}
   12736 
   12737 #ifdef DIAGNOSTIC
   12738 	if (reg > MII_ADDRMASK) {
   12739 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12740 		    __func__, sc->sc_phytype, reg);
   12741 		reg &= MII_ADDRMASK;
   12742 	}
   12743 #endif
   12744 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   12745 
   12746 	sc->phy.release(sc);
   12747 	return rv;
   12748 }
   12749 
   12750 /*
   12751  * wm_gmii_gs40g_readreg:	[mii interface function]
   12752  *
   12753  *	Read a PHY register on the I2100 and I211.
   12754  * This could be handled by the PHY layer if we didn't have to lock the
   12755  * resource ...
   12756  */
   12757 static int
   12758 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12759 {
   12760 	struct wm_softc *sc = device_private(dev);
   12761 	int page, offset;
   12762 	int rv;
   12763 
   12764 	/* Acquire semaphore */
   12765 	rv = sc->phy.acquire(sc);
   12766 	if (rv != 0) {
   12767 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12768 		return rv;
   12769 	}
   12770 
   12771 	/* Page select */
   12772 	page = reg >> GS40G_PAGE_SHIFT;
   12773 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12774 	if (rv != 0)
   12775 		goto release;
   12776 
   12777 	/* Read reg */
   12778 	offset = reg & GS40G_OFFSET_MASK;
   12779 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   12780 
   12781 release:
   12782 	sc->phy.release(sc);
   12783 	return rv;
   12784 }
   12785 
   12786 /*
   12787  * wm_gmii_gs40g_writereg:	[mii interface function]
   12788  *
   12789  *	Write a PHY register on the I210 and I211.
   12790  * This could be handled by the PHY layer if we didn't have to lock the
   12791  * resource ...
   12792  */
   12793 static int
   12794 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   12795 {
   12796 	struct wm_softc *sc = device_private(dev);
   12797 	uint16_t page;
   12798 	int offset, rv;
   12799 
   12800 	/* Acquire semaphore */
   12801 	rv = sc->phy.acquire(sc);
   12802 	if (rv != 0) {
   12803 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12804 		return rv;
   12805 	}
   12806 
   12807 	/* Page select */
   12808 	page = reg >> GS40G_PAGE_SHIFT;
   12809 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12810 	if (rv != 0)
   12811 		goto release;
   12812 
   12813 	/* Write reg */
   12814 	offset = reg & GS40G_OFFSET_MASK;
   12815 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   12816 
   12817 release:
   12818 	/* Release semaphore */
   12819 	sc->phy.release(sc);
   12820 	return rv;
   12821 }
   12822 
   12823 /*
   12824  * wm_gmii_statchg:	[mii interface function]
   12825  *
   12826  *	Callback from MII layer when media changes.
   12827  */
   12828 static void
   12829 wm_gmii_statchg(struct ifnet *ifp)
   12830 {
   12831 	struct wm_softc *sc = ifp->if_softc;
   12832 	struct mii_data *mii = &sc->sc_mii;
   12833 
   12834 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   12835 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12836 	sc->sc_fcrtl &= ~FCRTL_XONE;
   12837 
   12838 	/* Get flow control negotiation result. */
   12839 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   12840 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   12841 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   12842 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   12843 	}
   12844 
   12845 	if (sc->sc_flowflags & IFM_FLOW) {
   12846 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   12847 			sc->sc_ctrl |= CTRL_TFCE;
   12848 			sc->sc_fcrtl |= FCRTL_XONE;
   12849 		}
   12850 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   12851 			sc->sc_ctrl |= CTRL_RFCE;
   12852 	}
   12853 
   12854 	if (mii->mii_media_active & IFM_FDX) {
   12855 		DPRINTF(sc, WM_DEBUG_LINK,
   12856 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   12857 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12858 	} else {
   12859 		DPRINTF(sc, WM_DEBUG_LINK,
   12860 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   12861 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12862 	}
   12863 
   12864 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12865 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12866 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12867 	    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12868 	if (sc->sc_type == WM_T_80003) {
   12869 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   12870 		case IFM_1000_T:
   12871 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12872 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   12873 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   12874 			break;
   12875 		default:
   12876 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12877 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   12878 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   12879 			break;
   12880 		}
   12881 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   12882 	}
   12883 }
   12884 
   12885 /* kumeran related (80003, ICH* and PCH*) */
   12886 
   12887 /*
   12888  * wm_kmrn_readreg:
   12889  *
   12890  *	Read a kumeran register
   12891  */
   12892 static int
   12893 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   12894 {
   12895 	int rv;
   12896 
   12897 	if (sc->sc_type == WM_T_80003)
   12898 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12899 	else
   12900 		rv = sc->phy.acquire(sc);
   12901 	if (rv != 0) {
   12902 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   12903 		    __func__);
   12904 		return rv;
   12905 	}
   12906 
   12907 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   12908 
   12909 	if (sc->sc_type == WM_T_80003)
   12910 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12911 	else
   12912 		sc->phy.release(sc);
   12913 
   12914 	return rv;
   12915 }
   12916 
   12917 static int
   12918 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   12919 {
   12920 
   12921 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12922 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   12923 	    KUMCTRLSTA_REN);
   12924 	CSR_WRITE_FLUSH(sc);
   12925 	delay(2);
   12926 
   12927 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   12928 
   12929 	return 0;
   12930 }
   12931 
   12932 /*
   12933  * wm_kmrn_writereg:
   12934  *
   12935  *	Write a kumeran register
   12936  */
   12937 static int
   12938 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   12939 {
   12940 	int rv;
   12941 
   12942 	if (sc->sc_type == WM_T_80003)
   12943 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12944 	else
   12945 		rv = sc->phy.acquire(sc);
   12946 	if (rv != 0) {
   12947 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   12948 		    __func__);
   12949 		return rv;
   12950 	}
   12951 
   12952 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   12953 
   12954 	if (sc->sc_type == WM_T_80003)
   12955 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12956 	else
   12957 		sc->phy.release(sc);
   12958 
   12959 	return rv;
   12960 }
   12961 
   12962 static int
   12963 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   12964 {
   12965 
   12966 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12967 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   12968 
   12969 	return 0;
   12970 }
   12971 
   12972 /*
   12973  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   12974  * This access method is different from IEEE MMD.
   12975  */
   12976 static int
   12977 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   12978 {
   12979 	struct wm_softc *sc = device_private(dev);
   12980 	int rv;
   12981 
   12982 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   12983 	if (rv != 0)
   12984 		return rv;
   12985 
   12986 	if (rd)
   12987 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   12988 	else
   12989 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   12990 	return rv;
   12991 }
   12992 
   12993 static int
   12994 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   12995 {
   12996 
   12997 	return wm_access_emi_reg_locked(dev, reg, val, true);
   12998 }
   12999 
   13000 static int
   13001 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   13002 {
   13003 
   13004 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   13005 }
   13006 
   13007 /* SGMII related */
   13008 
   13009 /*
   13010  * wm_sgmii_uses_mdio
   13011  *
   13012  * Check whether the transaction is to the internal PHY or the external
   13013  * MDIO interface. Return true if it's MDIO.
   13014  */
   13015 static bool
   13016 wm_sgmii_uses_mdio(struct wm_softc *sc)
   13017 {
   13018 	uint32_t reg;
   13019 	bool ismdio = false;
   13020 
   13021 	switch (sc->sc_type) {
   13022 	case WM_T_82575:
   13023 	case WM_T_82576:
   13024 		reg = CSR_READ(sc, WMREG_MDIC);
   13025 		ismdio = ((reg & MDIC_DEST) != 0);
   13026 		break;
   13027 	case WM_T_82580:
   13028 	case WM_T_I350:
   13029 	case WM_T_I354:
   13030 	case WM_T_I210:
   13031 	case WM_T_I211:
   13032 		reg = CSR_READ(sc, WMREG_MDICNFG);
   13033 		ismdio = ((reg & MDICNFG_DEST) != 0);
   13034 		break;
   13035 	default:
   13036 		break;
   13037 	}
   13038 
   13039 	return ismdio;
   13040 }
   13041 
   13042 /* Setup internal SGMII PHY for SFP */
   13043 static void
   13044 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
   13045 {
   13046 	uint16_t id1, id2, phyreg;
   13047 	int i, rv;
   13048 
   13049 	if (((sc->sc_flags & WM_F_SGMII) == 0)
   13050 	    || ((sc->sc_flags & WM_F_SFP) == 0))
   13051 		return;
   13052 
   13053 	for (i = 0; i < MII_NPHY; i++) {
   13054 		sc->phy.no_errprint = true;
   13055 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
   13056 		if (rv != 0)
   13057 			continue;
   13058 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
   13059 		if (rv != 0)
   13060 			continue;
   13061 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
   13062 			continue;
   13063 		sc->phy.no_errprint = false;
   13064 
   13065 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
   13066 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
   13067 		phyreg |= ESSR_SGMII_WOC_COPPER;
   13068 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
   13069 		break;
   13070 	}
   13071 
   13072 }
   13073 
   13074 /*
   13075  * wm_sgmii_readreg:	[mii interface function]
   13076  *
   13077  *	Read a PHY register on the SGMII
   13078  * This could be handled by the PHY layer if we didn't have to lock the
   13079  * resource ...
   13080  */
   13081 static int
   13082 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   13083 {
   13084 	struct wm_softc *sc = device_private(dev);
   13085 	int rv;
   13086 
   13087 	rv = sc->phy.acquire(sc);
   13088 	if (rv != 0) {
   13089 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   13090 		return rv;
   13091 	}
   13092 
   13093 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   13094 
   13095 	sc->phy.release(sc);
   13096 	return rv;
   13097 }
   13098 
   13099 static int
   13100 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   13101 {
   13102 	struct wm_softc *sc = device_private(dev);
   13103 	uint32_t i2ccmd;
   13104 	int i, rv = 0;
   13105 
   13106 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   13107 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   13108 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   13109 
   13110 	/* Poll the ready bit */
   13111 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   13112 		delay(50);
   13113 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   13114 		if (i2ccmd & I2CCMD_READY)
   13115 			break;
   13116 	}
   13117 	if ((i2ccmd & I2CCMD_READY) == 0) {
   13118 		device_printf(dev, "I2CCMD Read did not complete\n");
   13119 		rv = ETIMEDOUT;
   13120 	}
   13121 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   13122 		if (!sc->phy.no_errprint)
   13123 			device_printf(dev, "I2CCMD Error bit set\n");
   13124 		rv = EIO;
   13125 	}
   13126 
   13127 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   13128 
   13129 	return rv;
   13130 }
   13131 
   13132 /*
   13133  * wm_sgmii_writereg:	[mii interface function]
   13134  *
   13135  *	Write a PHY register on the SGMII.
   13136  * This could be handled by the PHY layer if we didn't have to lock the
   13137  * resource ...
   13138  */
   13139 static int
   13140 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   13141 {
   13142 	struct wm_softc *sc = device_private(dev);
   13143 	int rv;
   13144 
   13145 	rv = sc->phy.acquire(sc);
   13146 	if (rv != 0) {
   13147 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   13148 		return rv;
   13149 	}
   13150 
   13151 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   13152 
   13153 	sc->phy.release(sc);
   13154 
   13155 	return rv;
   13156 }
   13157 
   13158 static int
   13159 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   13160 {
   13161 	struct wm_softc *sc = device_private(dev);
   13162 	uint32_t i2ccmd;
   13163 	uint16_t swapdata;
   13164 	int rv = 0;
   13165 	int i;
   13166 
   13167 	/* Swap the data bytes for the I2C interface */
   13168 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   13169 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   13170 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   13171 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   13172 
   13173 	/* Poll the ready bit */
   13174 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   13175 		delay(50);
   13176 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   13177 		if (i2ccmd & I2CCMD_READY)
   13178 			break;
   13179 	}
   13180 	if ((i2ccmd & I2CCMD_READY) == 0) {
   13181 		device_printf(dev, "I2CCMD Write did not complete\n");
   13182 		rv = ETIMEDOUT;
   13183 	}
   13184 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   13185 		device_printf(dev, "I2CCMD Error bit set\n");
   13186 		rv = EIO;
   13187 	}
   13188 
   13189 	return rv;
   13190 }
   13191 
   13192 /* TBI related */
   13193 
   13194 static bool
   13195 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   13196 {
   13197 	bool sig;
   13198 
   13199 	sig = ctrl & CTRL_SWDPIN(1);
   13200 
   13201 	/*
   13202 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   13203 	 * detect a signal, 1 if they don't.
   13204 	 */
   13205 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   13206 		sig = !sig;
   13207 
   13208 	return sig;
   13209 }
   13210 
   13211 /*
   13212  * wm_tbi_mediainit:
   13213  *
   13214  *	Initialize media for use on 1000BASE-X devices.
   13215  */
   13216 static void
   13217 wm_tbi_mediainit(struct wm_softc *sc)
   13218 {
   13219 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   13220 	const char *sep = "";
   13221 
   13222 	if (sc->sc_type < WM_T_82543)
   13223 		sc->sc_tipg = TIPG_WM_DFLT;
   13224 	else
   13225 		sc->sc_tipg = TIPG_LG_DFLT;
   13226 
   13227 	sc->sc_tbi_serdes_anegticks = 5;
   13228 
   13229 	/* Initialize our media structures */
   13230 	sc->sc_mii.mii_ifp = ifp;
   13231 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   13232 
   13233 	ifp->if_baudrate = IF_Gbps(1);
   13234 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   13235 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13236 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   13237 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   13238 		    sc->sc_core_lock);
   13239 	} else {
   13240 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   13241 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   13242 	}
   13243 
   13244 	/*
   13245 	 * SWD Pins:
   13246 	 *
   13247 	 *	0 = Link LED (output)
   13248 	 *	1 = Loss Of Signal (input)
   13249 	 */
   13250 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   13251 
   13252 	/* XXX Perhaps this is only for TBI */
   13253 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13254 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   13255 
   13256 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   13257 		sc->sc_ctrl &= ~CTRL_LRST;
   13258 
   13259 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13260 
   13261 #define	ADD(ss, mm, dd)							  \
   13262 do {									  \
   13263 	aprint_normal("%s%s", sep, ss);					  \
   13264 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   13265 	sep = ", ";							  \
   13266 } while (/*CONSTCOND*/0)
   13267 
   13268 	aprint_normal_dev(sc->sc_dev, "");
   13269 
   13270 	if (sc->sc_type == WM_T_I354) {
   13271 		uint32_t status;
   13272 
   13273 		status = CSR_READ(sc, WMREG_STATUS);
   13274 		if (((status & STATUS_2P5_SKU) != 0)
   13275 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   13276 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   13277 		} else
   13278 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   13279 	} else if (sc->sc_type == WM_T_82545) {
   13280 		/* Only 82545 is LX (XXX except SFP) */
   13281 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   13282 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   13283 	} else if (sc->sc_sfptype != 0) {
   13284 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   13285 		switch (sc->sc_sfptype) {
   13286 		default:
   13287 		case SFF_SFP_ETH_FLAGS_1000SX:
   13288 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   13289 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   13290 			break;
   13291 		case SFF_SFP_ETH_FLAGS_1000LX:
   13292 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   13293 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   13294 			break;
   13295 		case SFF_SFP_ETH_FLAGS_1000CX:
   13296 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   13297 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   13298 			break;
   13299 		case SFF_SFP_ETH_FLAGS_1000T:
   13300 			ADD("1000baseT", IFM_1000_T, 0);
   13301 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   13302 			break;
   13303 		case SFF_SFP_ETH_FLAGS_100FX:
   13304 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   13305 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   13306 			break;
   13307 		}
   13308 	} else {
   13309 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   13310 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   13311 	}
   13312 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   13313 	aprint_normal("\n");
   13314 
   13315 #undef ADD
   13316 
   13317 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   13318 }
   13319 
   13320 /*
   13321  * wm_tbi_mediachange:	[ifmedia interface function]
   13322  *
   13323  *	Set hardware to newly-selected media on a 1000BASE-X device.
   13324  */
   13325 static int
   13326 wm_tbi_mediachange(struct ifnet *ifp)
   13327 {
   13328 	struct wm_softc *sc = ifp->if_softc;
   13329 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   13330 	uint32_t status, ctrl;
   13331 	bool signal;
   13332 	int i;
   13333 
   13334 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   13335 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   13336 		/* XXX need some work for >= 82571 and < 82575 */
   13337 		if (sc->sc_type < WM_T_82575)
   13338 			return 0;
   13339 	}
   13340 
   13341 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   13342 	    || (sc->sc_type >= WM_T_82575))
   13343 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   13344 
   13345 	sc->sc_ctrl &= ~CTRL_LRST;
   13346 	sc->sc_txcw = TXCW_ANE;
   13347 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13348 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   13349 	else if (ife->ifm_media & IFM_FDX)
   13350 		sc->sc_txcw |= TXCW_FD;
   13351 	else
   13352 		sc->sc_txcw |= TXCW_HD;
   13353 
   13354 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   13355 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   13356 
   13357 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   13358 		device_xname(sc->sc_dev), sc->sc_txcw));
   13359 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13360 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13361 	CSR_WRITE_FLUSH(sc);
   13362 	delay(1000);
   13363 
   13364 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13365 	signal = wm_tbi_havesignal(sc, ctrl);
   13366 
   13367 	DPRINTF(sc, WM_DEBUG_LINK,
   13368 	    ("%s: signal = %d\n", device_xname(sc->sc_dev), signal));
   13369 
   13370 	if (signal) {
   13371 		/* Have signal; wait for the link to come up. */
   13372 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   13373 			delay(10000);
   13374 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   13375 				break;
   13376 		}
   13377 
   13378 		DPRINTF(sc, WM_DEBUG_LINK,
   13379 		    ("%s: i = %d after waiting for link\n",
   13380 			device_xname(sc->sc_dev), i));
   13381 
   13382 		status = CSR_READ(sc, WMREG_STATUS);
   13383 		DPRINTF(sc, WM_DEBUG_LINK,
   13384 		    ("%s: status after final read = 0x%x, STATUS_LU = %#"
   13385 			__PRIxBIT "\n",
   13386 			device_xname(sc->sc_dev), status, STATUS_LU));
   13387 		if (status & STATUS_LU) {
   13388 			/* Link is up. */
   13389 			DPRINTF(sc, WM_DEBUG_LINK,
   13390 			    ("%s: LINK: set media -> link up %s\n",
   13391 				device_xname(sc->sc_dev),
   13392 				(status & STATUS_FD) ? "FDX" : "HDX"));
   13393 
   13394 			/*
   13395 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   13396 			 * so we should update sc->sc_ctrl
   13397 			 */
   13398 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   13399 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   13400 			sc->sc_fcrtl &= ~FCRTL_XONE;
   13401 			if (status & STATUS_FD)
   13402 				sc->sc_tctl |=
   13403 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   13404 			else
   13405 				sc->sc_tctl |=
   13406 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   13407 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   13408 				sc->sc_fcrtl |= FCRTL_XONE;
   13409 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   13410 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   13411 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   13412 			sc->sc_tbi_linkup = 1;
   13413 		} else {
   13414 			if (i == WM_LINKUP_TIMEOUT)
   13415 				wm_check_for_link(sc);
   13416 			/* Link is down. */
   13417 			DPRINTF(sc, WM_DEBUG_LINK,
   13418 			    ("%s: LINK: set media -> link down\n",
   13419 				device_xname(sc->sc_dev)));
   13420 			sc->sc_tbi_linkup = 0;
   13421 		}
   13422 	} else {
   13423 		DPRINTF(sc, WM_DEBUG_LINK,
   13424 		    ("%s: LINK: set media -> no signal\n",
   13425 			device_xname(sc->sc_dev)));
   13426 		sc->sc_tbi_linkup = 0;
   13427 	}
   13428 
   13429 	wm_tbi_serdes_set_linkled(sc);
   13430 
   13431 	return 0;
   13432 }
   13433 
   13434 /*
   13435  * wm_tbi_mediastatus:	[ifmedia interface function]
   13436  *
   13437  *	Get the current interface media status on a 1000BASE-X device.
   13438  */
   13439 static void
   13440 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13441 {
   13442 	struct wm_softc *sc = ifp->if_softc;
   13443 	uint32_t ctrl, status;
   13444 
   13445 	ifmr->ifm_status = IFM_AVALID;
   13446 	ifmr->ifm_active = IFM_ETHER;
   13447 
   13448 	status = CSR_READ(sc, WMREG_STATUS);
   13449 	if ((status & STATUS_LU) == 0) {
   13450 		ifmr->ifm_active |= IFM_NONE;
   13451 		return;
   13452 	}
   13453 
   13454 	ifmr->ifm_status |= IFM_ACTIVE;
   13455 	/* Only 82545 is LX */
   13456 	if (sc->sc_type == WM_T_82545)
   13457 		ifmr->ifm_active |= IFM_1000_LX;
   13458 	else
   13459 		ifmr->ifm_active |= IFM_1000_SX;
   13460 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   13461 		ifmr->ifm_active |= IFM_FDX;
   13462 	else
   13463 		ifmr->ifm_active |= IFM_HDX;
   13464 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13465 	if (ctrl & CTRL_RFCE)
   13466 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   13467 	if (ctrl & CTRL_TFCE)
   13468 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   13469 }
   13470 
   13471 /* XXX TBI only */
   13472 static int
   13473 wm_check_for_link(struct wm_softc *sc)
   13474 {
   13475 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   13476 	uint32_t rxcw;
   13477 	uint32_t ctrl;
   13478 	uint32_t status;
   13479 	bool signal;
   13480 
   13481 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
   13482 		device_xname(sc->sc_dev), __func__));
   13483 
   13484 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   13485 		/* XXX need some work for >= 82571 */
   13486 		if (sc->sc_type >= WM_T_82571) {
   13487 			sc->sc_tbi_linkup = 1;
   13488 			return 0;
   13489 		}
   13490 	}
   13491 
   13492 	rxcw = CSR_READ(sc, WMREG_RXCW);
   13493 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13494 	status = CSR_READ(sc, WMREG_STATUS);
   13495 	signal = wm_tbi_havesignal(sc, ctrl);
   13496 
   13497 	DPRINTF(sc, WM_DEBUG_LINK,
   13498 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   13499 		device_xname(sc->sc_dev), __func__, signal,
   13500 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   13501 
   13502 	/*
   13503 	 * SWDPIN   LU RXCW
   13504 	 *	0    0	  0
   13505 	 *	0    0	  1	(should not happen)
   13506 	 *	0    1	  0	(should not happen)
   13507 	 *	0    1	  1	(should not happen)
   13508 	 *	1    0	  0	Disable autonego and force linkup
   13509 	 *	1    0	  1	got /C/ but not linkup yet
   13510 	 *	1    1	  0	(linkup)
   13511 	 *	1    1	  1	If IFM_AUTO, back to autonego
   13512 	 *
   13513 	 */
   13514 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   13515 		DPRINTF(sc, WM_DEBUG_LINK,
   13516 		    ("%s: %s: force linkup and fullduplex\n",
   13517 			device_xname(sc->sc_dev), __func__));
   13518 		sc->sc_tbi_linkup = 0;
   13519 		/* Disable auto-negotiation in the TXCW register */
   13520 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   13521 
   13522 		/*
   13523 		 * Force link-up and also force full-duplex.
   13524 		 *
   13525 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   13526 		 * so we should update sc->sc_ctrl
   13527 		 */
   13528 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   13529 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13530 	} else if (((status & STATUS_LU) != 0)
   13531 	    && ((rxcw & RXCW_C) != 0)
   13532 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   13533 		sc->sc_tbi_linkup = 1;
   13534 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   13535 			device_xname(sc->sc_dev), __func__));
   13536 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13537 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   13538 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   13539 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
   13540 			device_xname(sc->sc_dev), __func__));
   13541 	} else {
   13542 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   13543 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   13544 			status));
   13545 	}
   13546 
   13547 	return 0;
   13548 }
   13549 
   13550 /*
   13551  * wm_tbi_tick:
   13552  *
   13553  *	Check the link on TBI devices.
   13554  *	This function acts as mii_tick().
   13555  */
   13556 static void
   13557 wm_tbi_tick(struct wm_softc *sc)
   13558 {
   13559 	struct mii_data *mii = &sc->sc_mii;
   13560 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13561 	uint32_t status;
   13562 
   13563 	KASSERT(mutex_owned(sc->sc_core_lock));
   13564 
   13565 	status = CSR_READ(sc, WMREG_STATUS);
   13566 
   13567 	/* XXX is this needed? */
   13568 	(void)CSR_READ(sc, WMREG_RXCW);
   13569 	(void)CSR_READ(sc, WMREG_CTRL);
   13570 
   13571 	/* set link status */
   13572 	if ((status & STATUS_LU) == 0) {
   13573 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   13574 			device_xname(sc->sc_dev)));
   13575 		sc->sc_tbi_linkup = 0;
   13576 	} else if (sc->sc_tbi_linkup == 0) {
   13577 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   13578 			device_xname(sc->sc_dev),
   13579 			(status & STATUS_FD) ? "FDX" : "HDX"));
   13580 		sc->sc_tbi_linkup = 1;
   13581 		sc->sc_tbi_serdes_ticks = 0;
   13582 	}
   13583 
   13584 	if ((sc->sc_if_flags & IFF_UP) == 0)
   13585 		goto setled;
   13586 
   13587 	if ((status & STATUS_LU) == 0) {
   13588 		sc->sc_tbi_linkup = 0;
   13589 		/* If the timer expired, retry autonegotiation */
   13590 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13591 		    && (++sc->sc_tbi_serdes_ticks
   13592 			>= sc->sc_tbi_serdes_anegticks)) {
   13593 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13594 				device_xname(sc->sc_dev), __func__));
   13595 			sc->sc_tbi_serdes_ticks = 0;
   13596 			/*
   13597 			 * Reset the link, and let autonegotiation do
   13598 			 * its thing
   13599 			 */
   13600 			sc->sc_ctrl |= CTRL_LRST;
   13601 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13602 			CSR_WRITE_FLUSH(sc);
   13603 			delay(1000);
   13604 			sc->sc_ctrl &= ~CTRL_LRST;
   13605 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13606 			CSR_WRITE_FLUSH(sc);
   13607 			delay(1000);
   13608 			CSR_WRITE(sc, WMREG_TXCW,
   13609 			    sc->sc_txcw & ~TXCW_ANE);
   13610 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13611 		}
   13612 	}
   13613 
   13614 setled:
   13615 	wm_tbi_serdes_set_linkled(sc);
   13616 }
   13617 
   13618 /* SERDES related */
   13619 static void
   13620 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   13621 {
   13622 	uint32_t reg;
   13623 
   13624 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13625 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13626 		return;
   13627 
   13628 	/* Enable PCS to turn on link */
   13629 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   13630 	reg |= PCS_CFG_PCS_EN;
   13631 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   13632 
   13633 	/* Power up the laser */
   13634 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13635 	reg &= ~CTRL_EXT_SWDPIN(3);
   13636 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13637 
   13638 	/* Flush the write to verify completion */
   13639 	CSR_WRITE_FLUSH(sc);
   13640 	delay(1000);
   13641 }
   13642 
   13643 static int
   13644 wm_serdes_mediachange(struct ifnet *ifp)
   13645 {
   13646 	struct wm_softc *sc = ifp->if_softc;
   13647 	bool pcs_autoneg = true; /* XXX */
   13648 	uint32_t ctrl_ext, pcs_lctl, reg;
   13649 
   13650 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13651 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13652 		return 0;
   13653 
   13654 	/* XXX Currently, this function is not called on 8257[12] */
   13655 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   13656 	    || (sc->sc_type >= WM_T_82575))
   13657 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   13658 
   13659 	/* Power on the sfp cage if present */
   13660 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13661 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   13662 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   13663 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13664 
   13665 	sc->sc_ctrl |= CTRL_SLU;
   13666 
   13667 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   13668 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   13669 
   13670 		reg = CSR_READ(sc, WMREG_CONNSW);
   13671 		reg |= CONNSW_ENRGSRC;
   13672 		CSR_WRITE(sc, WMREG_CONNSW, reg);
   13673 	}
   13674 
   13675 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   13676 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   13677 	case CTRL_EXT_LINK_MODE_SGMII:
   13678 		/* SGMII mode lets the phy handle forcing speed/duplex */
   13679 		pcs_autoneg = true;
   13680 		/* Autoneg time out should be disabled for SGMII mode */
   13681 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   13682 		break;
   13683 	case CTRL_EXT_LINK_MODE_1000KX:
   13684 		pcs_autoneg = false;
   13685 		/* FALLTHROUGH */
   13686 	default:
   13687 		if ((sc->sc_type == WM_T_82575)
   13688 		    || (sc->sc_type == WM_T_82576)) {
   13689 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   13690 				pcs_autoneg = false;
   13691 		}
   13692 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   13693 		    | CTRL_FRCFDX;
   13694 
   13695 		/* Set speed of 1000/Full if speed/duplex is forced */
   13696 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   13697 	}
   13698 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13699 
   13700 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   13701 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   13702 
   13703 	if (pcs_autoneg) {
   13704 		/* Set PCS register for autoneg */
   13705 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   13706 
   13707 		/* Disable force flow control for autoneg */
   13708 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   13709 
   13710 		/* Configure flow control advertisement for autoneg */
   13711 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   13712 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   13713 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   13714 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   13715 	} else
   13716 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   13717 
   13718 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   13719 
   13720 	return 0;
   13721 }
   13722 
   13723 static void
   13724 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13725 {
   13726 	struct wm_softc *sc = ifp->if_softc;
   13727 	struct mii_data *mii = &sc->sc_mii;
   13728 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13729 	uint32_t pcs_adv, pcs_lpab, reg;
   13730 
   13731 	ifmr->ifm_status = IFM_AVALID;
   13732 	ifmr->ifm_active = IFM_ETHER;
   13733 
   13734 	/* Check PCS */
   13735 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13736 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   13737 		ifmr->ifm_active |= IFM_NONE;
   13738 		sc->sc_tbi_linkup = 0;
   13739 		goto setled;
   13740 	}
   13741 
   13742 	sc->sc_tbi_linkup = 1;
   13743 	ifmr->ifm_status |= IFM_ACTIVE;
   13744 	if (sc->sc_type == WM_T_I354) {
   13745 		uint32_t status;
   13746 
   13747 		status = CSR_READ(sc, WMREG_STATUS);
   13748 		if (((status & STATUS_2P5_SKU) != 0)
   13749 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   13750 			ifmr->ifm_active |= IFM_2500_KX;
   13751 		} else
   13752 			ifmr->ifm_active |= IFM_1000_KX;
   13753 	} else {
   13754 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   13755 		case PCS_LSTS_SPEED_10:
   13756 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   13757 			break;
   13758 		case PCS_LSTS_SPEED_100:
   13759 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   13760 			break;
   13761 		case PCS_LSTS_SPEED_1000:
   13762 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13763 			break;
   13764 		default:
   13765 			device_printf(sc->sc_dev, "Unknown speed\n");
   13766 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13767 			break;
   13768 		}
   13769 	}
   13770 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   13771 	if ((reg & PCS_LSTS_FDX) != 0)
   13772 		ifmr->ifm_active |= IFM_FDX;
   13773 	else
   13774 		ifmr->ifm_active |= IFM_HDX;
   13775 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   13776 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   13777 		/* Check flow */
   13778 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13779 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   13780 			DPRINTF(sc, WM_DEBUG_LINK,
   13781 			    ("XXX LINKOK but not ACOMP\n"));
   13782 			goto setled;
   13783 		}
   13784 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   13785 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   13786 		DPRINTF(sc, WM_DEBUG_LINK,
   13787 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   13788 		if ((pcs_adv & TXCW_SYM_PAUSE)
   13789 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   13790 			mii->mii_media_active |= IFM_FLOW
   13791 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   13792 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   13793 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   13794 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   13795 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   13796 			mii->mii_media_active |= IFM_FLOW
   13797 			    | IFM_ETH_TXPAUSE;
   13798 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   13799 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   13800 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   13801 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   13802 			mii->mii_media_active |= IFM_FLOW
   13803 			    | IFM_ETH_RXPAUSE;
   13804 		}
   13805 	}
   13806 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   13807 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   13808 setled:
   13809 	wm_tbi_serdes_set_linkled(sc);
   13810 }
   13811 
   13812 /*
   13813  * wm_serdes_tick:
   13814  *
   13815  *	Check the link on serdes devices.
   13816  */
   13817 static void
   13818 wm_serdes_tick(struct wm_softc *sc)
   13819 {
   13820 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   13821 	struct mii_data *mii = &sc->sc_mii;
   13822 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13823 	uint32_t reg;
   13824 
   13825 	KASSERT(mutex_owned(sc->sc_core_lock));
   13826 
   13827 	mii->mii_media_status = IFM_AVALID;
   13828 	mii->mii_media_active = IFM_ETHER;
   13829 
   13830 	/* Check PCS */
   13831 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13832 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   13833 		mii->mii_media_status |= IFM_ACTIVE;
   13834 		sc->sc_tbi_linkup = 1;
   13835 		sc->sc_tbi_serdes_ticks = 0;
   13836 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   13837 		if ((reg & PCS_LSTS_FDX) != 0)
   13838 			mii->mii_media_active |= IFM_FDX;
   13839 		else
   13840 			mii->mii_media_active |= IFM_HDX;
   13841 	} else {
   13842 		mii->mii_media_status |= IFM_NONE;
   13843 		sc->sc_tbi_linkup = 0;
   13844 		/* If the timer expired, retry autonegotiation */
   13845 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13846 		    && (++sc->sc_tbi_serdes_ticks
   13847 			>= sc->sc_tbi_serdes_anegticks)) {
   13848 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13849 				device_xname(sc->sc_dev), __func__));
   13850 			sc->sc_tbi_serdes_ticks = 0;
   13851 			/* XXX */
   13852 			wm_serdes_mediachange(ifp);
   13853 		}
   13854 	}
   13855 
   13856 	wm_tbi_serdes_set_linkled(sc);
   13857 }
   13858 
   13859 /* SFP related */
   13860 
   13861 static int
   13862 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   13863 {
   13864 	uint32_t i2ccmd;
   13865 	int i;
   13866 
   13867 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   13868 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   13869 
   13870 	/* Poll the ready bit */
   13871 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   13872 		delay(50);
   13873 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   13874 		if (i2ccmd & I2CCMD_READY)
   13875 			break;
   13876 	}
   13877 	if ((i2ccmd & I2CCMD_READY) == 0)
   13878 		return -1;
   13879 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   13880 		return -1;
   13881 
   13882 	*data = i2ccmd & 0x00ff;
   13883 
   13884 	return 0;
   13885 }
   13886 
   13887 static uint32_t
   13888 wm_sfp_get_media_type(struct wm_softc *sc)
   13889 {
   13890 	uint32_t ctrl_ext;
   13891 	uint8_t val = 0;
   13892 	int timeout = 3;
   13893 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   13894 	int rv = -1;
   13895 
   13896 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13897 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   13898 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   13899 	CSR_WRITE_FLUSH(sc);
   13900 
   13901 	/* Read SFP module data */
   13902 	while (timeout) {
   13903 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   13904 		if (rv == 0)
   13905 			break;
   13906 		delay(100*1000); /* XXX too big */
   13907 		timeout--;
   13908 	}
   13909 	if (rv != 0)
   13910 		goto out;
   13911 
   13912 	switch (val) {
   13913 	case SFF_SFP_ID_SFF:
   13914 		aprint_normal_dev(sc->sc_dev,
   13915 		    "Module/Connector soldered to board\n");
   13916 		break;
   13917 	case SFF_SFP_ID_SFP:
   13918 		sc->sc_flags |= WM_F_SFP;
   13919 		break;
   13920 	case SFF_SFP_ID_UNKNOWN:
   13921 		goto out;
   13922 	default:
   13923 		break;
   13924 	}
   13925 
   13926 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   13927 	if (rv != 0)
   13928 		goto out;
   13929 
   13930 	sc->sc_sfptype = val;
   13931 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   13932 		mediatype = WM_MEDIATYPE_SERDES;
   13933 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   13934 		sc->sc_flags |= WM_F_SGMII;
   13935 		mediatype = WM_MEDIATYPE_COPPER;
   13936 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   13937 		sc->sc_flags |= WM_F_SGMII;
   13938 		mediatype = WM_MEDIATYPE_SERDES;
   13939 	} else {
   13940 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   13941 		    __func__, sc->sc_sfptype);
   13942 		sc->sc_sfptype = 0; /* XXX unknown */
   13943 	}
   13944 
   13945 out:
   13946 	/* Restore I2C interface setting */
   13947 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13948 
   13949 	return mediatype;
   13950 }
   13951 
   13952 /*
   13953  * NVM related.
   13954  * Microwire, SPI (w/wo EERD) and Flash.
   13955  */
   13956 
   13957 /* Both spi and uwire */
   13958 
   13959 /*
   13960  * wm_eeprom_sendbits:
   13961  *
   13962  *	Send a series of bits to the EEPROM.
   13963  */
   13964 static void
   13965 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   13966 {
   13967 	uint32_t reg;
   13968 	int x;
   13969 
   13970 	reg = CSR_READ(sc, WMREG_EECD);
   13971 
   13972 	for (x = nbits; x > 0; x--) {
   13973 		if (bits & (1U << (x - 1)))
   13974 			reg |= EECD_DI;
   13975 		else
   13976 			reg &= ~EECD_DI;
   13977 		CSR_WRITE(sc, WMREG_EECD, reg);
   13978 		CSR_WRITE_FLUSH(sc);
   13979 		delay(2);
   13980 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13981 		CSR_WRITE_FLUSH(sc);
   13982 		delay(2);
   13983 		CSR_WRITE(sc, WMREG_EECD, reg);
   13984 		CSR_WRITE_FLUSH(sc);
   13985 		delay(2);
   13986 	}
   13987 }
   13988 
   13989 /*
   13990  * wm_eeprom_recvbits:
   13991  *
   13992  *	Receive a series of bits from the EEPROM.
   13993  */
   13994 static void
   13995 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   13996 {
   13997 	uint32_t reg, val;
   13998 	int x;
   13999 
   14000 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   14001 
   14002 	val = 0;
   14003 	for (x = nbits; x > 0; x--) {
   14004 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   14005 		CSR_WRITE_FLUSH(sc);
   14006 		delay(2);
   14007 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   14008 			val |= (1U << (x - 1));
   14009 		CSR_WRITE(sc, WMREG_EECD, reg);
   14010 		CSR_WRITE_FLUSH(sc);
   14011 		delay(2);
   14012 	}
   14013 	*valp = val;
   14014 }
   14015 
   14016 /* Microwire */
   14017 
   14018 /*
   14019  * wm_nvm_read_uwire:
   14020  *
   14021  *	Read a word from the EEPROM using the MicroWire protocol.
   14022  */
   14023 static int
   14024 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14025 {
   14026 	uint32_t reg, val;
   14027 	int i, rv;
   14028 
   14029 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14030 		device_xname(sc->sc_dev), __func__));
   14031 
   14032 	rv = sc->nvm.acquire(sc);
   14033 	if (rv != 0)
   14034 		return rv;
   14035 
   14036 	for (i = 0; i < wordcnt; i++) {
   14037 		/* Clear SK and DI. */
   14038 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   14039 		CSR_WRITE(sc, WMREG_EECD, reg);
   14040 
   14041 		/*
   14042 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   14043 		 * and Xen.
   14044 		 *
   14045 		 * We use this workaround only for 82540 because qemu's
   14046 		 * e1000 act as 82540.
   14047 		 */
   14048 		if (sc->sc_type == WM_T_82540) {
   14049 			reg |= EECD_SK;
   14050 			CSR_WRITE(sc, WMREG_EECD, reg);
   14051 			reg &= ~EECD_SK;
   14052 			CSR_WRITE(sc, WMREG_EECD, reg);
   14053 			CSR_WRITE_FLUSH(sc);
   14054 			delay(2);
   14055 		}
   14056 		/* XXX: end of workaround */
   14057 
   14058 		/* Set CHIP SELECT. */
   14059 		reg |= EECD_CS;
   14060 		CSR_WRITE(sc, WMREG_EECD, reg);
   14061 		CSR_WRITE_FLUSH(sc);
   14062 		delay(2);
   14063 
   14064 		/* Shift in the READ command. */
   14065 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   14066 
   14067 		/* Shift in address. */
   14068 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   14069 
   14070 		/* Shift out the data. */
   14071 		wm_eeprom_recvbits(sc, &val, 16);
   14072 		data[i] = val & 0xffff;
   14073 
   14074 		/* Clear CHIP SELECT. */
   14075 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   14076 		CSR_WRITE(sc, WMREG_EECD, reg);
   14077 		CSR_WRITE_FLUSH(sc);
   14078 		delay(2);
   14079 	}
   14080 
   14081 	sc->nvm.release(sc);
   14082 	return 0;
   14083 }
   14084 
   14085 /* SPI */
   14086 
   14087 /*
   14088  * Set SPI and FLASH related information from the EECD register.
   14089  * For 82541 and 82547, the word size is taken from EEPROM.
   14090  */
   14091 static int
   14092 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   14093 {
   14094 	int size;
   14095 	uint32_t reg;
   14096 	uint16_t data;
   14097 
   14098 	reg = CSR_READ(sc, WMREG_EECD);
   14099 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   14100 
   14101 	/* Read the size of NVM from EECD by default */
   14102 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   14103 	switch (sc->sc_type) {
   14104 	case WM_T_82541:
   14105 	case WM_T_82541_2:
   14106 	case WM_T_82547:
   14107 	case WM_T_82547_2:
   14108 		/* Set dummy value to access EEPROM */
   14109 		sc->sc_nvm_wordsize = 64;
   14110 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   14111 			aprint_error_dev(sc->sc_dev,
   14112 			    "%s: failed to read EEPROM size\n", __func__);
   14113 		}
   14114 		reg = data;
   14115 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   14116 		if (size == 0)
   14117 			size = 6; /* 64 word size */
   14118 		else
   14119 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   14120 		break;
   14121 	case WM_T_80003:
   14122 	case WM_T_82571:
   14123 	case WM_T_82572:
   14124 	case WM_T_82573: /* SPI case */
   14125 	case WM_T_82574: /* SPI case */
   14126 	case WM_T_82583: /* SPI case */
   14127 		size += NVM_WORD_SIZE_BASE_SHIFT;
   14128 		if (size > 14)
   14129 			size = 14;
   14130 		break;
   14131 	case WM_T_82575:
   14132 	case WM_T_82576:
   14133 	case WM_T_82580:
   14134 	case WM_T_I350:
   14135 	case WM_T_I354:
   14136 	case WM_T_I210:
   14137 	case WM_T_I211:
   14138 		size += NVM_WORD_SIZE_BASE_SHIFT;
   14139 		if (size > 15)
   14140 			size = 15;
   14141 		break;
   14142 	default:
   14143 		aprint_error_dev(sc->sc_dev,
   14144 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   14145 		return -1;
   14146 		break;
   14147 	}
   14148 
   14149 	sc->sc_nvm_wordsize = 1 << size;
   14150 
   14151 	return 0;
   14152 }
   14153 
   14154 /*
   14155  * wm_nvm_ready_spi:
   14156  *
   14157  *	Wait for a SPI EEPROM to be ready for commands.
   14158  */
   14159 static int
   14160 wm_nvm_ready_spi(struct wm_softc *sc)
   14161 {
   14162 	uint32_t val;
   14163 	int usec;
   14164 
   14165 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14166 		device_xname(sc->sc_dev), __func__));
   14167 
   14168 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   14169 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   14170 		wm_eeprom_recvbits(sc, &val, 8);
   14171 		if ((val & SPI_SR_RDY) == 0)
   14172 			break;
   14173 	}
   14174 	if (usec >= SPI_MAX_RETRIES) {
   14175 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   14176 		return -1;
   14177 	}
   14178 	return 0;
   14179 }
   14180 
   14181 /*
   14182  * wm_nvm_read_spi:
   14183  *
   14184  *	Read a work from the EEPROM using the SPI protocol.
   14185  */
   14186 static int
   14187 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14188 {
   14189 	uint32_t reg, val;
   14190 	int i;
   14191 	uint8_t opc;
   14192 	int rv;
   14193 
   14194 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14195 		device_xname(sc->sc_dev), __func__));
   14196 
   14197 	rv = sc->nvm.acquire(sc);
   14198 	if (rv != 0)
   14199 		return rv;
   14200 
   14201 	/* Clear SK and CS. */
   14202 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   14203 	CSR_WRITE(sc, WMREG_EECD, reg);
   14204 	CSR_WRITE_FLUSH(sc);
   14205 	delay(2);
   14206 
   14207 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   14208 		goto out;
   14209 
   14210 	/* Toggle CS to flush commands. */
   14211 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   14212 	CSR_WRITE_FLUSH(sc);
   14213 	delay(2);
   14214 	CSR_WRITE(sc, WMREG_EECD, reg);
   14215 	CSR_WRITE_FLUSH(sc);
   14216 	delay(2);
   14217 
   14218 	opc = SPI_OPC_READ;
   14219 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   14220 		opc |= SPI_OPC_A8;
   14221 
   14222 	wm_eeprom_sendbits(sc, opc, 8);
   14223 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   14224 
   14225 	for (i = 0; i < wordcnt; i++) {
   14226 		wm_eeprom_recvbits(sc, &val, 16);
   14227 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   14228 	}
   14229 
   14230 	/* Raise CS and clear SK. */
   14231 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   14232 	CSR_WRITE(sc, WMREG_EECD, reg);
   14233 	CSR_WRITE_FLUSH(sc);
   14234 	delay(2);
   14235 
   14236 out:
   14237 	sc->nvm.release(sc);
   14238 	return rv;
   14239 }
   14240 
   14241 /* Using with EERD */
   14242 
   14243 static int
   14244 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   14245 {
   14246 	uint32_t attempts = 100000;
   14247 	uint32_t i, reg = 0;
   14248 	int32_t done = -1;
   14249 
   14250 	for (i = 0; i < attempts; i++) {
   14251 		reg = CSR_READ(sc, rw);
   14252 
   14253 		if (reg & EERD_DONE) {
   14254 			done = 0;
   14255 			break;
   14256 		}
   14257 		delay(5);
   14258 	}
   14259 
   14260 	return done;
   14261 }
   14262 
   14263 static int
   14264 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   14265 {
   14266 	int i, eerd = 0;
   14267 	int rv;
   14268 
   14269 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14270 		device_xname(sc->sc_dev), __func__));
   14271 
   14272 	rv = sc->nvm.acquire(sc);
   14273 	if (rv != 0)
   14274 		return rv;
   14275 
   14276 	for (i = 0; i < wordcnt; i++) {
   14277 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   14278 		CSR_WRITE(sc, WMREG_EERD, eerd);
   14279 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   14280 		if (rv != 0) {
   14281 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   14282 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   14283 			break;
   14284 		}
   14285 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   14286 	}
   14287 
   14288 	sc->nvm.release(sc);
   14289 	return rv;
   14290 }
   14291 
   14292 /* Flash */
   14293 
   14294 static int
   14295 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   14296 {
   14297 	uint32_t eecd;
   14298 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   14299 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   14300 	uint32_t nvm_dword = 0;
   14301 	uint8_t sig_byte = 0;
   14302 	int rv;
   14303 
   14304 	switch (sc->sc_type) {
   14305 	case WM_T_PCH_SPT:
   14306 	case WM_T_PCH_CNP:
   14307 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   14308 		act_offset = ICH_NVM_SIG_WORD * 2;
   14309 
   14310 		/* Set bank to 0 in case flash read fails. */
   14311 		*bank = 0;
   14312 
   14313 		/* Check bank 0 */
   14314 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   14315 		if (rv != 0)
   14316 			return rv;
   14317 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   14318 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14319 			*bank = 0;
   14320 			return 0;
   14321 		}
   14322 
   14323 		/* Check bank 1 */
   14324 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   14325 		    &nvm_dword);
   14326 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   14327 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14328 			*bank = 1;
   14329 			return 0;
   14330 		}
   14331 		aprint_error_dev(sc->sc_dev,
   14332 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   14333 		return -1;
   14334 	case WM_T_ICH8:
   14335 	case WM_T_ICH9:
   14336 		eecd = CSR_READ(sc, WMREG_EECD);
   14337 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   14338 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   14339 			return 0;
   14340 		}
   14341 		/* FALLTHROUGH */
   14342 	default:
   14343 		/* Default to 0 */
   14344 		*bank = 0;
   14345 
   14346 		/* Check bank 0 */
   14347 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   14348 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14349 			*bank = 0;
   14350 			return 0;
   14351 		}
   14352 
   14353 		/* Check bank 1 */
   14354 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   14355 		    &sig_byte);
   14356 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14357 			*bank = 1;
   14358 			return 0;
   14359 		}
   14360 	}
   14361 
   14362 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   14363 		device_xname(sc->sc_dev)));
   14364 	return -1;
   14365 }
   14366 
   14367 /******************************************************************************
   14368  * This function does initial flash setup so that a new read/write/erase cycle
   14369  * can be started.
   14370  *
   14371  * sc - The pointer to the hw structure
   14372  ****************************************************************************/
   14373 static int32_t
   14374 wm_ich8_cycle_init(struct wm_softc *sc)
   14375 {
   14376 	uint16_t hsfsts;
   14377 	int32_t error = 1;
   14378 	int32_t i     = 0;
   14379 
   14380 	if (sc->sc_type >= WM_T_PCH_SPT)
   14381 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   14382 	else
   14383 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   14384 
   14385 	/* May be check the Flash Des Valid bit in Hw status */
   14386 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   14387 		return error;
   14388 
   14389 	/* Clear FCERR in Hw status by writing 1 */
   14390 	/* Clear DAEL in Hw status by writing a 1 */
   14391 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   14392 
   14393 	if (sc->sc_type >= WM_T_PCH_SPT)
   14394 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   14395 	else
   14396 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   14397 
   14398 	/*
   14399 	 * Either we should have a hardware SPI cycle in progress bit to check
   14400 	 * against, in order to start a new cycle or FDONE bit should be
   14401 	 * changed in the hardware so that it is 1 after hardware reset, which
   14402 	 * can then be used as an indication whether a cycle is in progress or
   14403 	 * has been completed .. we should also have some software semaphore
   14404 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   14405 	 * threads access to those bits can be sequentiallized or a way so that
   14406 	 * 2 threads don't start the cycle at the same time
   14407 	 */
   14408 
   14409 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   14410 		/*
   14411 		 * There is no cycle running at present, so we can start a
   14412 		 * cycle
   14413 		 */
   14414 
   14415 		/* Begin by setting Flash Cycle Done. */
   14416 		hsfsts |= HSFSTS_DONE;
   14417 		if (sc->sc_type >= WM_T_PCH_SPT)
   14418 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14419 			    hsfsts & 0xffffUL);
   14420 		else
   14421 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   14422 		error = 0;
   14423 	} else {
   14424 		/*
   14425 		 * Otherwise poll for sometime so the current cycle has a
   14426 		 * chance to end before giving up.
   14427 		 */
   14428 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   14429 			if (sc->sc_type >= WM_T_PCH_SPT)
   14430 				hsfsts = ICH8_FLASH_READ32(sc,
   14431 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14432 			else
   14433 				hsfsts = ICH8_FLASH_READ16(sc,
   14434 				    ICH_FLASH_HSFSTS);
   14435 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   14436 				error = 0;
   14437 				break;
   14438 			}
   14439 			delay(1);
   14440 		}
   14441 		if (error == 0) {
   14442 			/*
   14443 			 * Successful in waiting for previous cycle to timeout,
   14444 			 * now set the Flash Cycle Done.
   14445 			 */
   14446 			hsfsts |= HSFSTS_DONE;
   14447 			if (sc->sc_type >= WM_T_PCH_SPT)
   14448 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14449 				    hsfsts & 0xffffUL);
   14450 			else
   14451 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   14452 				    hsfsts);
   14453 		}
   14454 	}
   14455 	return error;
   14456 }
   14457 
   14458 /******************************************************************************
   14459  * This function starts a flash cycle and waits for its completion
   14460  *
   14461  * sc - The pointer to the hw structure
   14462  ****************************************************************************/
   14463 static int32_t
   14464 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   14465 {
   14466 	uint16_t hsflctl;
   14467 	uint16_t hsfsts;
   14468 	int32_t error = 1;
   14469 	uint32_t i = 0;
   14470 
   14471 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   14472 	if (sc->sc_type >= WM_T_PCH_SPT)
   14473 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   14474 	else
   14475 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14476 	hsflctl |= HSFCTL_GO;
   14477 	if (sc->sc_type >= WM_T_PCH_SPT)
   14478 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14479 		    (uint32_t)hsflctl << 16);
   14480 	else
   14481 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14482 
   14483 	/* Wait till FDONE bit is set to 1 */
   14484 	do {
   14485 		if (sc->sc_type >= WM_T_PCH_SPT)
   14486 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14487 			    & 0xffffUL;
   14488 		else
   14489 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   14490 		if (hsfsts & HSFSTS_DONE)
   14491 			break;
   14492 		delay(1);
   14493 		i++;
   14494 	} while (i < timeout);
   14495 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   14496 		error = 0;
   14497 
   14498 	return error;
   14499 }
   14500 
   14501 /******************************************************************************
   14502  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   14503  *
   14504  * sc - The pointer to the hw structure
   14505  * index - The index of the byte or word to read.
   14506  * size - Size of data to read, 1=byte 2=word, 4=dword
   14507  * data - Pointer to the word to store the value read.
   14508  *****************************************************************************/
   14509 static int32_t
   14510 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   14511     uint32_t size, uint32_t *data)
   14512 {
   14513 	uint16_t hsfsts;
   14514 	uint16_t hsflctl;
   14515 	uint32_t flash_linear_address;
   14516 	uint32_t flash_data = 0;
   14517 	int32_t error = 1;
   14518 	int32_t count = 0;
   14519 
   14520 	if (size < 1  || size > 4 || data == 0x0 ||
   14521 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   14522 		return error;
   14523 
   14524 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   14525 	    sc->sc_ich8_flash_base;
   14526 
   14527 	do {
   14528 		delay(1);
   14529 		/* Steps */
   14530 		error = wm_ich8_cycle_init(sc);
   14531 		if (error)
   14532 			break;
   14533 
   14534 		if (sc->sc_type >= WM_T_PCH_SPT)
   14535 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14536 			    >> 16;
   14537 		else
   14538 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14539 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   14540 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   14541 		    & HSFCTL_BCOUNT_MASK;
   14542 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   14543 		if (sc->sc_type >= WM_T_PCH_SPT) {
   14544 			/*
   14545 			 * In SPT, This register is in Lan memory space, not
   14546 			 * flash. Therefore, only 32 bit access is supported.
   14547 			 */
   14548 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14549 			    (uint32_t)hsflctl << 16);
   14550 		} else
   14551 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14552 
   14553 		/*
   14554 		 * Write the last 24 bits of index into Flash Linear address
   14555 		 * field in Flash Address
   14556 		 */
   14557 		/* TODO: TBD maybe check the index against the size of flash */
   14558 
   14559 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   14560 
   14561 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   14562 
   14563 		/*
   14564 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   14565 		 * the whole sequence a few more times, else read in (shift in)
   14566 		 * the Flash Data0, the order is least significant byte first
   14567 		 * msb to lsb
   14568 		 */
   14569 		if (error == 0) {
   14570 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   14571 			if (size == 1)
   14572 				*data = (uint8_t)(flash_data & 0x000000FF);
   14573 			else if (size == 2)
   14574 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   14575 			else if (size == 4)
   14576 				*data = (uint32_t)flash_data;
   14577 			break;
   14578 		} else {
   14579 			/*
   14580 			 * If we've gotten here, then things are probably
   14581 			 * completely hosed, but if the error condition is
   14582 			 * detected, it won't hurt to give it another try...
   14583 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   14584 			 */
   14585 			if (sc->sc_type >= WM_T_PCH_SPT)
   14586 				hsfsts = ICH8_FLASH_READ32(sc,
   14587 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14588 			else
   14589 				hsfsts = ICH8_FLASH_READ16(sc,
   14590 				    ICH_FLASH_HSFSTS);
   14591 
   14592 			if (hsfsts & HSFSTS_ERR) {
   14593 				/* Repeat for some time before giving up. */
   14594 				continue;
   14595 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   14596 				break;
   14597 		}
   14598 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   14599 
   14600 	return error;
   14601 }
   14602 
   14603 /******************************************************************************
   14604  * Reads a single byte from the NVM using the ICH8 flash access registers.
   14605  *
   14606  * sc - pointer to wm_hw structure
   14607  * index - The index of the byte to read.
   14608  * data - Pointer to a byte to store the value read.
   14609  *****************************************************************************/
   14610 static int32_t
   14611 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   14612 {
   14613 	int32_t status;
   14614 	uint32_t word = 0;
   14615 
   14616 	status = wm_read_ich8_data(sc, index, 1, &word);
   14617 	if (status == 0)
   14618 		*data = (uint8_t)word;
   14619 	else
   14620 		*data = 0;
   14621 
   14622 	return status;
   14623 }
   14624 
   14625 /******************************************************************************
   14626  * Reads a word from the NVM using the ICH8 flash access registers.
   14627  *
   14628  * sc - pointer to wm_hw structure
   14629  * index - The starting byte index of the word to read.
   14630  * data - Pointer to a word to store the value read.
   14631  *****************************************************************************/
   14632 static int32_t
   14633 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   14634 {
   14635 	int32_t status;
   14636 	uint32_t word = 0;
   14637 
   14638 	status = wm_read_ich8_data(sc, index, 2, &word);
   14639 	if (status == 0)
   14640 		*data = (uint16_t)word;
   14641 	else
   14642 		*data = 0;
   14643 
   14644 	return status;
   14645 }
   14646 
   14647 /******************************************************************************
   14648  * Reads a dword from the NVM using the ICH8 flash access registers.
   14649  *
   14650  * sc - pointer to wm_hw structure
   14651  * index - The starting byte index of the word to read.
   14652  * data - Pointer to a word to store the value read.
   14653  *****************************************************************************/
   14654 static int32_t
   14655 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   14656 {
   14657 	int32_t status;
   14658 
   14659 	status = wm_read_ich8_data(sc, index, 4, data);
   14660 	return status;
   14661 }
   14662 
   14663 /******************************************************************************
   14664  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   14665  * register.
   14666  *
   14667  * sc - Struct containing variables accessed by shared code
   14668  * offset - offset of word in the EEPROM to read
   14669  * data - word read from the EEPROM
   14670  * words - number of words to read
   14671  *****************************************************************************/
   14672 static int
   14673 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14674 {
   14675 	int rv;
   14676 	uint32_t flash_bank = 0;
   14677 	uint32_t act_offset = 0;
   14678 	uint32_t bank_offset = 0;
   14679 	uint16_t word = 0;
   14680 	uint16_t i = 0;
   14681 
   14682 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14683 		device_xname(sc->sc_dev), __func__));
   14684 
   14685 	rv = sc->nvm.acquire(sc);
   14686 	if (rv != 0)
   14687 		return rv;
   14688 
   14689 	/*
   14690 	 * We need to know which is the valid flash bank.  In the event
   14691 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14692 	 * managing flash_bank. So it cannot be trusted and needs
   14693 	 * to be updated with each read.
   14694 	 */
   14695 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14696 	if (rv) {
   14697 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14698 			device_xname(sc->sc_dev)));
   14699 		flash_bank = 0;
   14700 	}
   14701 
   14702 	/*
   14703 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14704 	 * size
   14705 	 */
   14706 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14707 
   14708 	for (i = 0; i < words; i++) {
   14709 		/* The NVM part needs a byte offset, hence * 2 */
   14710 		act_offset = bank_offset + ((offset + i) * 2);
   14711 		rv = wm_read_ich8_word(sc, act_offset, &word);
   14712 		if (rv) {
   14713 			aprint_error_dev(sc->sc_dev,
   14714 			    "%s: failed to read NVM\n", __func__);
   14715 			break;
   14716 		}
   14717 		data[i] = word;
   14718 	}
   14719 
   14720 	sc->nvm.release(sc);
   14721 	return rv;
   14722 }
   14723 
   14724 /******************************************************************************
   14725  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   14726  * register.
   14727  *
   14728  * sc - Struct containing variables accessed by shared code
   14729  * offset - offset of word in the EEPROM to read
   14730  * data - word read from the EEPROM
   14731  * words - number of words to read
   14732  *****************************************************************************/
   14733 static int
   14734 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14735 {
   14736 	int	 rv;
   14737 	uint32_t flash_bank = 0;
   14738 	uint32_t act_offset = 0;
   14739 	uint32_t bank_offset = 0;
   14740 	uint32_t dword = 0;
   14741 	uint16_t i = 0;
   14742 
   14743 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14744 		device_xname(sc->sc_dev), __func__));
   14745 
   14746 	rv = sc->nvm.acquire(sc);
   14747 	if (rv != 0)
   14748 		return rv;
   14749 
   14750 	/*
   14751 	 * We need to know which is the valid flash bank.  In the event
   14752 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14753 	 * managing flash_bank. So it cannot be trusted and needs
   14754 	 * to be updated with each read.
   14755 	 */
   14756 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14757 	if (rv) {
   14758 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14759 			device_xname(sc->sc_dev)));
   14760 		flash_bank = 0;
   14761 	}
   14762 
   14763 	/*
   14764 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14765 	 * size
   14766 	 */
   14767 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14768 
   14769 	for (i = 0; i < words; i++) {
   14770 		/* The NVM part needs a byte offset, hence * 2 */
   14771 		act_offset = bank_offset + ((offset + i) * 2);
   14772 		/* but we must read dword aligned, so mask ... */
   14773 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   14774 		if (rv) {
   14775 			aprint_error_dev(sc->sc_dev,
   14776 			    "%s: failed to read NVM\n", __func__);
   14777 			break;
   14778 		}
   14779 		/* ... and pick out low or high word */
   14780 		if ((act_offset & 0x2) == 0)
   14781 			data[i] = (uint16_t)(dword & 0xFFFF);
   14782 		else
   14783 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   14784 	}
   14785 
   14786 	sc->nvm.release(sc);
   14787 	return rv;
   14788 }
   14789 
   14790 /* iNVM */
   14791 
   14792 static int
   14793 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   14794 {
   14795 	int32_t	 rv = 0;
   14796 	uint32_t invm_dword;
   14797 	uint16_t i;
   14798 	uint8_t record_type, word_address;
   14799 
   14800 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14801 		device_xname(sc->sc_dev), __func__));
   14802 
   14803 	for (i = 0; i < INVM_SIZE; i++) {
   14804 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   14805 		/* Get record type */
   14806 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   14807 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   14808 			break;
   14809 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   14810 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   14811 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   14812 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   14813 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   14814 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   14815 			if (word_address == address) {
   14816 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   14817 				rv = 0;
   14818 				break;
   14819 			}
   14820 		}
   14821 	}
   14822 
   14823 	return rv;
   14824 }
   14825 
   14826 static int
   14827 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14828 {
   14829 	int i, rv;
   14830 
   14831 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14832 		device_xname(sc->sc_dev), __func__));
   14833 
   14834 	rv = sc->nvm.acquire(sc);
   14835 	if (rv != 0)
   14836 		return rv;
   14837 
   14838 	for (i = 0; i < words; i++) {
   14839 		switch (offset + i) {
   14840 		case NVM_OFF_MACADDR:
   14841 		case NVM_OFF_MACADDR1:
   14842 		case NVM_OFF_MACADDR2:
   14843 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   14844 			if (rv != 0) {
   14845 				data[i] = 0xffff;
   14846 				rv = -1;
   14847 			}
   14848 			break;
   14849 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   14850 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14851 			if (rv != 0) {
   14852 				*data = INVM_DEFAULT_AL;
   14853 				rv = 0;
   14854 			}
   14855 			break;
   14856 		case NVM_OFF_CFG2:
   14857 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14858 			if (rv != 0) {
   14859 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   14860 				rv = 0;
   14861 			}
   14862 			break;
   14863 		case NVM_OFF_CFG4:
   14864 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14865 			if (rv != 0) {
   14866 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   14867 				rv = 0;
   14868 			}
   14869 			break;
   14870 		case NVM_OFF_LED_1_CFG:
   14871 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14872 			if (rv != 0) {
   14873 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   14874 				rv = 0;
   14875 			}
   14876 			break;
   14877 		case NVM_OFF_LED_0_2_CFG:
   14878 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14879 			if (rv != 0) {
   14880 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   14881 				rv = 0;
   14882 			}
   14883 			break;
   14884 		case NVM_OFF_ID_LED_SETTINGS:
   14885 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14886 			if (rv != 0) {
   14887 				*data = ID_LED_RESERVED_FFFF;
   14888 				rv = 0;
   14889 			}
   14890 			break;
   14891 		default:
   14892 			DPRINTF(sc, WM_DEBUG_NVM,
   14893 			    ("NVM word 0x%02x is not mapped.\n", offset));
   14894 			*data = NVM_RESERVED_WORD;
   14895 			break;
   14896 		}
   14897 	}
   14898 
   14899 	sc->nvm.release(sc);
   14900 	return rv;
   14901 }
   14902 
   14903 /* Lock, detecting NVM type, validate checksum, version and read */
   14904 
   14905 static int
   14906 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   14907 {
   14908 	uint32_t eecd = 0;
   14909 
   14910 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   14911 	    || sc->sc_type == WM_T_82583) {
   14912 		eecd = CSR_READ(sc, WMREG_EECD);
   14913 
   14914 		/* Isolate bits 15 & 16 */
   14915 		eecd = ((eecd >> 15) & 0x03);
   14916 
   14917 		/* If both bits are set, device is Flash type */
   14918 		if (eecd == 0x03)
   14919 			return 0;
   14920 	}
   14921 	return 1;
   14922 }
   14923 
   14924 static int
   14925 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   14926 {
   14927 	uint32_t eec;
   14928 
   14929 	eec = CSR_READ(sc, WMREG_EEC);
   14930 	if ((eec & EEC_FLASH_DETECTED) != 0)
   14931 		return 1;
   14932 
   14933 	return 0;
   14934 }
   14935 
   14936 /*
   14937  * wm_nvm_validate_checksum
   14938  *
   14939  * The checksum is defined as the sum of the first 64 (16 bit) words.
   14940  */
   14941 static int
   14942 wm_nvm_validate_checksum(struct wm_softc *sc)
   14943 {
   14944 	uint16_t checksum;
   14945 	uint16_t eeprom_data;
   14946 #ifdef WM_DEBUG
   14947 	uint16_t csum_wordaddr, valid_checksum;
   14948 #endif
   14949 	int i;
   14950 
   14951 	checksum = 0;
   14952 
   14953 	/* Don't check for I211 */
   14954 	if (sc->sc_type == WM_T_I211)
   14955 		return 0;
   14956 
   14957 #ifdef WM_DEBUG
   14958 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   14959 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   14960 		csum_wordaddr = NVM_OFF_COMPAT;
   14961 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   14962 	} else {
   14963 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   14964 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   14965 	}
   14966 
   14967 	/* Dump EEPROM image for debug */
   14968 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14969 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14970 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   14971 		/* XXX PCH_SPT? */
   14972 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   14973 		if ((eeprom_data & valid_checksum) == 0)
   14974 			DPRINTF(sc, WM_DEBUG_NVM,
   14975 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   14976 				device_xname(sc->sc_dev), eeprom_data,
   14977 				valid_checksum));
   14978 	}
   14979 
   14980 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
   14981 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   14982 		for (i = 0; i < NVM_SIZE; i++) {
   14983 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14984 				printf("XXXX ");
   14985 			else
   14986 				printf("%04hx ", eeprom_data);
   14987 			if (i % 8 == 7)
   14988 				printf("\n");
   14989 		}
   14990 	}
   14991 
   14992 #endif /* WM_DEBUG */
   14993 
   14994 	for (i = 0; i < NVM_SIZE; i++) {
   14995 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14996 			return -1;
   14997 		checksum += eeprom_data;
   14998 	}
   14999 
   15000 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   15001 #ifdef WM_DEBUG
   15002 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   15003 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   15004 #endif
   15005 	}
   15006 
   15007 	return 0;
   15008 }
   15009 
   15010 static void
   15011 wm_nvm_version_invm(struct wm_softc *sc)
   15012 {
   15013 	uint32_t dword;
   15014 
   15015 	/*
   15016 	 * Linux's code to decode version is very strange, so we don't
   15017 	 * obey that algorithm and just use word 61 as the document.
   15018 	 * Perhaps it's not perfect though...
   15019 	 *
   15020 	 * Example:
   15021 	 *
   15022 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   15023 	 */
   15024 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   15025 	dword = __SHIFTOUT(dword, INVM_VER_1);
   15026 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   15027 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   15028 }
   15029 
   15030 static void
   15031 wm_nvm_version(struct wm_softc *sc)
   15032 {
   15033 	uint16_t major, minor, build, patch;
   15034 	uint16_t uid0, uid1;
   15035 	uint16_t nvm_data;
   15036 	uint16_t off;
   15037 	bool check_version = false;
   15038 	bool check_optionrom = false;
   15039 	bool have_build = false;
   15040 	bool have_uid = true;
   15041 
   15042 	/*
   15043 	 * Version format:
   15044 	 *
   15045 	 * XYYZ
   15046 	 * X0YZ
   15047 	 * X0YY
   15048 	 *
   15049 	 * Example:
   15050 	 *
   15051 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   15052 	 *	82571	0x50a6	5.10.6?
   15053 	 *	82572	0x506a	5.6.10?
   15054 	 *	82572EI	0x5069	5.6.9?
   15055 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   15056 	 *		0x2013	2.1.3?
   15057 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   15058 	 * ICH8+82567	0x0040	0.4.0?
   15059 	 * ICH9+82566	0x1040	1.4.0?
   15060 	 *ICH10+82567	0x0043	0.4.3?
   15061 	 *  PCH+82577	0x00c1	0.12.1?
   15062 	 * PCH2+82579	0x00d3	0.13.3?
   15063 	 *		0x00d4	0.13.4?
   15064 	 *  LPT+I218	0x0023	0.2.3?
   15065 	 *  SPT+I219	0x0084	0.8.4?
   15066 	 *  CNP+I219	0x0054	0.5.4?
   15067 	 */
   15068 
   15069 	/*
   15070 	 * XXX
   15071 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   15072 	 * I've never seen real 82574 hardware with such small SPI ROM.
   15073 	 */
   15074 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   15075 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   15076 		have_uid = false;
   15077 
   15078 	switch (sc->sc_type) {
   15079 	case WM_T_82571:
   15080 	case WM_T_82572:
   15081 	case WM_T_82574:
   15082 	case WM_T_82583:
   15083 		check_version = true;
   15084 		check_optionrom = true;
   15085 		have_build = true;
   15086 		break;
   15087 	case WM_T_ICH8:
   15088 	case WM_T_ICH9:
   15089 	case WM_T_ICH10:
   15090 	case WM_T_PCH:
   15091 	case WM_T_PCH2:
   15092 	case WM_T_PCH_LPT:
   15093 	case WM_T_PCH_SPT:
   15094 	case WM_T_PCH_CNP:
   15095 		check_version = true;
   15096 		have_build = true;
   15097 		have_uid = false;
   15098 		break;
   15099 	case WM_T_82575:
   15100 	case WM_T_82576:
   15101 	case WM_T_82580:
   15102 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   15103 			check_version = true;
   15104 		break;
   15105 	case WM_T_I211:
   15106 		wm_nvm_version_invm(sc);
   15107 		have_uid = false;
   15108 		goto printver;
   15109 	case WM_T_I210:
   15110 		if (!wm_nvm_flash_presence_i210(sc)) {
   15111 			wm_nvm_version_invm(sc);
   15112 			have_uid = false;
   15113 			goto printver;
   15114 		}
   15115 		/* FALLTHROUGH */
   15116 	case WM_T_I350:
   15117 	case WM_T_I354:
   15118 		check_version = true;
   15119 		check_optionrom = true;
   15120 		break;
   15121 	default:
   15122 		return;
   15123 	}
   15124 	if (check_version
   15125 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   15126 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   15127 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   15128 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   15129 			build = nvm_data & NVM_BUILD_MASK;
   15130 			have_build = true;
   15131 		} else
   15132 			minor = nvm_data & 0x00ff;
   15133 
   15134 		/* Decimal */
   15135 		minor = (minor / 16) * 10 + (minor % 16);
   15136 		sc->sc_nvm_ver_major = major;
   15137 		sc->sc_nvm_ver_minor = minor;
   15138 
   15139 printver:
   15140 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   15141 		    sc->sc_nvm_ver_minor);
   15142 		if (have_build) {
   15143 			sc->sc_nvm_ver_build = build;
   15144 			aprint_verbose(".%d", build);
   15145 		}
   15146 	}
   15147 
   15148 	/* Assume the Option ROM area is at avove NVM_SIZE */
   15149 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   15150 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   15151 		/* Option ROM Version */
   15152 		if ((off != 0x0000) && (off != 0xffff)) {
   15153 			int rv;
   15154 
   15155 			off += NVM_COMBO_VER_OFF;
   15156 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   15157 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   15158 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   15159 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   15160 				/* 16bits */
   15161 				major = uid0 >> 8;
   15162 				build = (uid0 << 8) | (uid1 >> 8);
   15163 				patch = uid1 & 0x00ff;
   15164 				aprint_verbose(", option ROM Version %d.%d.%d",
   15165 				    major, build, patch);
   15166 			}
   15167 		}
   15168 	}
   15169 
   15170 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   15171 		aprint_verbose(", Image Unique ID %08x",
   15172 		    ((uint32_t)uid1 << 16) | uid0);
   15173 }
   15174 
   15175 /*
   15176  * wm_nvm_read:
   15177  *
   15178  *	Read data from the serial EEPROM.
   15179  */
   15180 static int
   15181 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   15182 {
   15183 	int rv;
   15184 
   15185 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   15186 		device_xname(sc->sc_dev), __func__));
   15187 
   15188 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   15189 		return -1;
   15190 
   15191 	rv = sc->nvm.read(sc, word, wordcnt, data);
   15192 
   15193 	return rv;
   15194 }
   15195 
   15196 /*
   15197  * Hardware semaphores.
   15198  * Very complexed...
   15199  */
   15200 
   15201 static int
   15202 wm_get_null(struct wm_softc *sc)
   15203 {
   15204 
   15205 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15206 		device_xname(sc->sc_dev), __func__));
   15207 	return 0;
   15208 }
   15209 
   15210 static void
   15211 wm_put_null(struct wm_softc *sc)
   15212 {
   15213 
   15214 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15215 		device_xname(sc->sc_dev), __func__));
   15216 	return;
   15217 }
   15218 
   15219 static int
   15220 wm_get_eecd(struct wm_softc *sc)
   15221 {
   15222 	uint32_t reg;
   15223 	int x;
   15224 
   15225 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   15226 		device_xname(sc->sc_dev), __func__));
   15227 
   15228 	reg = CSR_READ(sc, WMREG_EECD);
   15229 
   15230 	/* Request EEPROM access. */
   15231 	reg |= EECD_EE_REQ;
   15232 	CSR_WRITE(sc, WMREG_EECD, reg);
   15233 
   15234 	/* ..and wait for it to be granted. */
   15235 	for (x = 0; x < 1000; x++) {
   15236 		reg = CSR_READ(sc, WMREG_EECD);
   15237 		if (reg & EECD_EE_GNT)
   15238 			break;
   15239 		delay(5);
   15240 	}
   15241 	if ((reg & EECD_EE_GNT) == 0) {
   15242 		aprint_error_dev(sc->sc_dev,
   15243 		    "could not acquire EEPROM GNT\n");
   15244 		reg &= ~EECD_EE_REQ;
   15245 		CSR_WRITE(sc, WMREG_EECD, reg);
   15246 		return -1;
   15247 	}
   15248 
   15249 	return 0;
   15250 }
   15251 
   15252 static void
   15253 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   15254 {
   15255 
   15256 	*eecd |= EECD_SK;
   15257 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   15258 	CSR_WRITE_FLUSH(sc);
   15259 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   15260 		delay(1);
   15261 	else
   15262 		delay(50);
   15263 }
   15264 
   15265 static void
   15266 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   15267 {
   15268 
   15269 	*eecd &= ~EECD_SK;
   15270 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   15271 	CSR_WRITE_FLUSH(sc);
   15272 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   15273 		delay(1);
   15274 	else
   15275 		delay(50);
   15276 }
   15277 
   15278 static void
   15279 wm_put_eecd(struct wm_softc *sc)
   15280 {
   15281 	uint32_t reg;
   15282 
   15283 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15284 		device_xname(sc->sc_dev), __func__));
   15285 
   15286 	/* Stop nvm */
   15287 	reg = CSR_READ(sc, WMREG_EECD);
   15288 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   15289 		/* Pull CS high */
   15290 		reg |= EECD_CS;
   15291 		wm_nvm_eec_clock_lower(sc, &reg);
   15292 	} else {
   15293 		/* CS on Microwire is active-high */
   15294 		reg &= ~(EECD_CS | EECD_DI);
   15295 		CSR_WRITE(sc, WMREG_EECD, reg);
   15296 		wm_nvm_eec_clock_raise(sc, &reg);
   15297 		wm_nvm_eec_clock_lower(sc, &reg);
   15298 	}
   15299 
   15300 	reg = CSR_READ(sc, WMREG_EECD);
   15301 	reg &= ~EECD_EE_REQ;
   15302 	CSR_WRITE(sc, WMREG_EECD, reg);
   15303 
   15304 	return;
   15305 }
   15306 
   15307 /*
   15308  * Get hardware semaphore.
   15309  * Same as e1000_get_hw_semaphore_generic()
   15310  */
   15311 static int
   15312 wm_get_swsm_semaphore(struct wm_softc *sc)
   15313 {
   15314 	int32_t timeout;
   15315 	uint32_t swsm;
   15316 
   15317 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15318 		device_xname(sc->sc_dev), __func__));
   15319 	KASSERT(sc->sc_nvm_wordsize > 0);
   15320 
   15321 retry:
   15322 	/* Get the SW semaphore. */
   15323 	timeout = sc->sc_nvm_wordsize + 1;
   15324 	while (timeout) {
   15325 		swsm = CSR_READ(sc, WMREG_SWSM);
   15326 
   15327 		if ((swsm & SWSM_SMBI) == 0)
   15328 			break;
   15329 
   15330 		delay(50);
   15331 		timeout--;
   15332 	}
   15333 
   15334 	if (timeout == 0) {
   15335 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   15336 			/*
   15337 			 * In rare circumstances, the SW semaphore may already
   15338 			 * be held unintentionally. Clear the semaphore once
   15339 			 * before giving up.
   15340 			 */
   15341 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   15342 			wm_put_swsm_semaphore(sc);
   15343 			goto retry;
   15344 		}
   15345 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
   15346 		return -1;
   15347 	}
   15348 
   15349 	/* Get the FW semaphore. */
   15350 	timeout = sc->sc_nvm_wordsize + 1;
   15351 	while (timeout) {
   15352 		swsm = CSR_READ(sc, WMREG_SWSM);
   15353 		swsm |= SWSM_SWESMBI;
   15354 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   15355 		/* If we managed to set the bit we got the semaphore. */
   15356 		swsm = CSR_READ(sc, WMREG_SWSM);
   15357 		if (swsm & SWSM_SWESMBI)
   15358 			break;
   15359 
   15360 		delay(50);
   15361 		timeout--;
   15362 	}
   15363 
   15364 	if (timeout == 0) {
   15365 		aprint_error_dev(sc->sc_dev,
   15366 		    "could not acquire SWSM SWESMBI\n");
   15367 		/* Release semaphores */
   15368 		wm_put_swsm_semaphore(sc);
   15369 		return -1;
   15370 	}
   15371 	return 0;
   15372 }
   15373 
   15374 /*
   15375  * Put hardware semaphore.
   15376  * Same as e1000_put_hw_semaphore_generic()
   15377  */
   15378 static void
   15379 wm_put_swsm_semaphore(struct wm_softc *sc)
   15380 {
   15381 	uint32_t swsm;
   15382 
   15383 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15384 		device_xname(sc->sc_dev), __func__));
   15385 
   15386 	swsm = CSR_READ(sc, WMREG_SWSM);
   15387 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   15388 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   15389 }
   15390 
   15391 /*
   15392  * Get SW/FW semaphore.
   15393  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   15394  */
   15395 static int
   15396 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   15397 {
   15398 	uint32_t swfw_sync;
   15399 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   15400 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   15401 	int timeout;
   15402 
   15403 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15404 		device_xname(sc->sc_dev), __func__));
   15405 
   15406 	if (sc->sc_type == WM_T_80003)
   15407 		timeout = 50;
   15408 	else
   15409 		timeout = 200;
   15410 
   15411 	while (timeout) {
   15412 		if (wm_get_swsm_semaphore(sc)) {
   15413 			aprint_error_dev(sc->sc_dev,
   15414 			    "%s: failed to get semaphore\n",
   15415 			    __func__);
   15416 			return -1;
   15417 		}
   15418 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   15419 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   15420 			swfw_sync |= swmask;
   15421 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   15422 			wm_put_swsm_semaphore(sc);
   15423 			return 0;
   15424 		}
   15425 		wm_put_swsm_semaphore(sc);
   15426 		delay(5000);
   15427 		timeout--;
   15428 	}
   15429 	device_printf(sc->sc_dev,
   15430 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   15431 	    mask, swfw_sync);
   15432 	return -1;
   15433 }
   15434 
   15435 static void
   15436 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   15437 {
   15438 	uint32_t swfw_sync;
   15439 
   15440 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15441 		device_xname(sc->sc_dev), __func__));
   15442 
   15443 	while (wm_get_swsm_semaphore(sc) != 0)
   15444 		continue;
   15445 
   15446 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   15447 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   15448 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   15449 
   15450 	wm_put_swsm_semaphore(sc);
   15451 }
   15452 
   15453 static int
   15454 wm_get_nvm_80003(struct wm_softc *sc)
   15455 {
   15456 	int rv;
   15457 
   15458 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   15459 		device_xname(sc->sc_dev), __func__));
   15460 
   15461 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   15462 		aprint_error_dev(sc->sc_dev,
   15463 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   15464 		return rv;
   15465 	}
   15466 
   15467 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15468 	    && (rv = wm_get_eecd(sc)) != 0) {
   15469 		aprint_error_dev(sc->sc_dev,
   15470 		    "%s: failed to get semaphore(EECD)\n", __func__);
   15471 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15472 		return rv;
   15473 	}
   15474 
   15475 	return 0;
   15476 }
   15477 
   15478 static void
   15479 wm_put_nvm_80003(struct wm_softc *sc)
   15480 {
   15481 
   15482 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15483 		device_xname(sc->sc_dev), __func__));
   15484 
   15485 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15486 		wm_put_eecd(sc);
   15487 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15488 }
   15489 
   15490 static int
   15491 wm_get_nvm_82571(struct wm_softc *sc)
   15492 {
   15493 	int rv;
   15494 
   15495 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15496 		device_xname(sc->sc_dev), __func__));
   15497 
   15498 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   15499 		return rv;
   15500 
   15501 	switch (sc->sc_type) {
   15502 	case WM_T_82573:
   15503 		break;
   15504 	default:
   15505 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15506 			rv = wm_get_eecd(sc);
   15507 		break;
   15508 	}
   15509 
   15510 	if (rv != 0) {
   15511 		aprint_error_dev(sc->sc_dev,
   15512 		    "%s: failed to get semaphore\n",
   15513 		    __func__);
   15514 		wm_put_swsm_semaphore(sc);
   15515 	}
   15516 
   15517 	return rv;
   15518 }
   15519 
   15520 static void
   15521 wm_put_nvm_82571(struct wm_softc *sc)
   15522 {
   15523 
   15524 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15525 		device_xname(sc->sc_dev), __func__));
   15526 
   15527 	switch (sc->sc_type) {
   15528 	case WM_T_82573:
   15529 		break;
   15530 	default:
   15531 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15532 			wm_put_eecd(sc);
   15533 		break;
   15534 	}
   15535 
   15536 	wm_put_swsm_semaphore(sc);
   15537 }
   15538 
   15539 static int
   15540 wm_get_phy_82575(struct wm_softc *sc)
   15541 {
   15542 
   15543 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15544 		device_xname(sc->sc_dev), __func__));
   15545 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15546 }
   15547 
   15548 static void
   15549 wm_put_phy_82575(struct wm_softc *sc)
   15550 {
   15551 
   15552 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15553 		device_xname(sc->sc_dev), __func__));
   15554 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15555 }
   15556 
   15557 static int
   15558 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   15559 {
   15560 	uint32_t ext_ctrl;
   15561 	int timeout = 200;
   15562 
   15563 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15564 		device_xname(sc->sc_dev), __func__));
   15565 
   15566 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15567 	for (timeout = 0; timeout < 200; timeout++) {
   15568 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15569 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15570 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15571 
   15572 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15573 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15574 			return 0;
   15575 		delay(5000);
   15576 	}
   15577 	device_printf(sc->sc_dev,
   15578 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   15579 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15580 	return -1;
   15581 }
   15582 
   15583 static void
   15584 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   15585 {
   15586 	uint32_t ext_ctrl;
   15587 
   15588 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15589 		device_xname(sc->sc_dev), __func__));
   15590 
   15591 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15592 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15593 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15594 
   15595 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15596 }
   15597 
   15598 static int
   15599 wm_get_swflag_ich8lan(struct wm_softc *sc)
   15600 {
   15601 	uint32_t ext_ctrl;
   15602 	int timeout;
   15603 
   15604 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15605 		device_xname(sc->sc_dev), __func__));
   15606 	mutex_enter(sc->sc_ich_phymtx);
   15607 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   15608 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15609 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   15610 			break;
   15611 		delay(1000);
   15612 	}
   15613 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   15614 		device_printf(sc->sc_dev,
   15615 		    "SW has already locked the resource\n");
   15616 		goto out;
   15617 	}
   15618 
   15619 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15620 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15621 	for (timeout = 0; timeout < 1000; timeout++) {
   15622 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15623 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15624 			break;
   15625 		delay(1000);
   15626 	}
   15627 	if (timeout >= 1000) {
   15628 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   15629 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15630 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15631 		goto out;
   15632 	}
   15633 	return 0;
   15634 
   15635 out:
   15636 	mutex_exit(sc->sc_ich_phymtx);
   15637 	return -1;
   15638 }
   15639 
   15640 static void
   15641 wm_put_swflag_ich8lan(struct wm_softc *sc)
   15642 {
   15643 	uint32_t ext_ctrl;
   15644 
   15645 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15646 		device_xname(sc->sc_dev), __func__));
   15647 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15648 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   15649 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15650 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15651 	} else
   15652 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   15653 
   15654 	mutex_exit(sc->sc_ich_phymtx);
   15655 }
   15656 
   15657 static int
   15658 wm_get_nvm_ich8lan(struct wm_softc *sc)
   15659 {
   15660 
   15661 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15662 		device_xname(sc->sc_dev), __func__));
   15663 	mutex_enter(sc->sc_ich_nvmmtx);
   15664 
   15665 	return 0;
   15666 }
   15667 
   15668 static void
   15669 wm_put_nvm_ich8lan(struct wm_softc *sc)
   15670 {
   15671 
   15672 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15673 		device_xname(sc->sc_dev), __func__));
   15674 	mutex_exit(sc->sc_ich_nvmmtx);
   15675 }
   15676 
   15677 static int
   15678 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   15679 {
   15680 	int i = 0;
   15681 	uint32_t reg;
   15682 
   15683 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15684 		device_xname(sc->sc_dev), __func__));
   15685 
   15686 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15687 	do {
   15688 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   15689 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15690 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15691 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   15692 			break;
   15693 		delay(2*1000);
   15694 		i++;
   15695 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   15696 
   15697 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   15698 		wm_put_hw_semaphore_82573(sc);
   15699 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   15700 		    device_xname(sc->sc_dev));
   15701 		return -1;
   15702 	}
   15703 
   15704 	return 0;
   15705 }
   15706 
   15707 static void
   15708 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   15709 {
   15710 	uint32_t reg;
   15711 
   15712 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15713 		device_xname(sc->sc_dev), __func__));
   15714 
   15715 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15716 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15717 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15718 }
   15719 
   15720 /*
   15721  * Management mode and power management related subroutines.
   15722  * BMC, AMT, suspend/resume and EEE.
   15723  */
   15724 
   15725 #ifdef WM_WOL
   15726 static int
   15727 wm_check_mng_mode(struct wm_softc *sc)
   15728 {
   15729 	int rv;
   15730 
   15731 	switch (sc->sc_type) {
   15732 	case WM_T_ICH8:
   15733 	case WM_T_ICH9:
   15734 	case WM_T_ICH10:
   15735 	case WM_T_PCH:
   15736 	case WM_T_PCH2:
   15737 	case WM_T_PCH_LPT:
   15738 	case WM_T_PCH_SPT:
   15739 	case WM_T_PCH_CNP:
   15740 		rv = wm_check_mng_mode_ich8lan(sc);
   15741 		break;
   15742 	case WM_T_82574:
   15743 	case WM_T_82583:
   15744 		rv = wm_check_mng_mode_82574(sc);
   15745 		break;
   15746 	case WM_T_82571:
   15747 	case WM_T_82572:
   15748 	case WM_T_82573:
   15749 	case WM_T_80003:
   15750 		rv = wm_check_mng_mode_generic(sc);
   15751 		break;
   15752 	default:
   15753 		/* Noting to do */
   15754 		rv = 0;
   15755 		break;
   15756 	}
   15757 
   15758 	return rv;
   15759 }
   15760 
   15761 static int
   15762 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   15763 {
   15764 	uint32_t fwsm;
   15765 
   15766 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15767 
   15768 	if (((fwsm & FWSM_FW_VALID) != 0)
   15769 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   15770 		return 1;
   15771 
   15772 	return 0;
   15773 }
   15774 
   15775 static int
   15776 wm_check_mng_mode_82574(struct wm_softc *sc)
   15777 {
   15778 	uint16_t data;
   15779 
   15780 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15781 
   15782 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   15783 		return 1;
   15784 
   15785 	return 0;
   15786 }
   15787 
   15788 static int
   15789 wm_check_mng_mode_generic(struct wm_softc *sc)
   15790 {
   15791 	uint32_t fwsm;
   15792 
   15793 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15794 
   15795 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   15796 		return 1;
   15797 
   15798 	return 0;
   15799 }
   15800 #endif /* WM_WOL */
   15801 
   15802 static int
   15803 wm_enable_mng_pass_thru(struct wm_softc *sc)
   15804 {
   15805 	uint32_t manc, fwsm, factps;
   15806 
   15807 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   15808 		return 0;
   15809 
   15810 	manc = CSR_READ(sc, WMREG_MANC);
   15811 
   15812 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   15813 		device_xname(sc->sc_dev), manc));
   15814 	if ((manc & MANC_RECV_TCO_EN) == 0)
   15815 		return 0;
   15816 
   15817 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   15818 		fwsm = CSR_READ(sc, WMREG_FWSM);
   15819 		factps = CSR_READ(sc, WMREG_FACTPS);
   15820 		if (((factps & FACTPS_MNGCG) == 0)
   15821 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   15822 			return 1;
   15823 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   15824 		uint16_t data;
   15825 
   15826 		factps = CSR_READ(sc, WMREG_FACTPS);
   15827 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15828 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   15829 			device_xname(sc->sc_dev), factps, data));
   15830 		if (((factps & FACTPS_MNGCG) == 0)
   15831 		    && ((data & NVM_CFG2_MNGM_MASK)
   15832 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   15833 			return 1;
   15834 	} else if (((manc & MANC_SMBUS_EN) != 0)
   15835 	    && ((manc & MANC_ASF_EN) == 0))
   15836 		return 1;
   15837 
   15838 	return 0;
   15839 }
   15840 
   15841 static bool
   15842 wm_phy_resetisblocked(struct wm_softc *sc)
   15843 {
   15844 	bool blocked = false;
   15845 	uint32_t reg;
   15846 	int i = 0;
   15847 
   15848 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15849 		device_xname(sc->sc_dev), __func__));
   15850 
   15851 	switch (sc->sc_type) {
   15852 	case WM_T_ICH8:
   15853 	case WM_T_ICH9:
   15854 	case WM_T_ICH10:
   15855 	case WM_T_PCH:
   15856 	case WM_T_PCH2:
   15857 	case WM_T_PCH_LPT:
   15858 	case WM_T_PCH_SPT:
   15859 	case WM_T_PCH_CNP:
   15860 		do {
   15861 			reg = CSR_READ(sc, WMREG_FWSM);
   15862 			if ((reg & FWSM_RSPCIPHY) == 0) {
   15863 				blocked = true;
   15864 				delay(10*1000);
   15865 				continue;
   15866 			}
   15867 			blocked = false;
   15868 		} while (blocked && (i++ < 30));
   15869 		return blocked;
   15870 		break;
   15871 	case WM_T_82571:
   15872 	case WM_T_82572:
   15873 	case WM_T_82573:
   15874 	case WM_T_82574:
   15875 	case WM_T_82583:
   15876 	case WM_T_80003:
   15877 		reg = CSR_READ(sc, WMREG_MANC);
   15878 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   15879 			return true;
   15880 		else
   15881 			return false;
   15882 		break;
   15883 	default:
   15884 		/* No problem */
   15885 		break;
   15886 	}
   15887 
   15888 	return false;
   15889 }
   15890 
   15891 static void
   15892 wm_get_hw_control(struct wm_softc *sc)
   15893 {
   15894 	uint32_t reg;
   15895 
   15896 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15897 		device_xname(sc->sc_dev), __func__));
   15898 
   15899 	if (sc->sc_type == WM_T_82573) {
   15900 		reg = CSR_READ(sc, WMREG_SWSM);
   15901 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   15902 	} else if (sc->sc_type >= WM_T_82571) {
   15903 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15904 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   15905 	}
   15906 }
   15907 
   15908 static void
   15909 wm_release_hw_control(struct wm_softc *sc)
   15910 {
   15911 	uint32_t reg;
   15912 
   15913 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15914 		device_xname(sc->sc_dev), __func__));
   15915 
   15916 	if (sc->sc_type == WM_T_82573) {
   15917 		reg = CSR_READ(sc, WMREG_SWSM);
   15918 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   15919 	} else if (sc->sc_type >= WM_T_82571) {
   15920 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15921 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   15922 	}
   15923 }
   15924 
   15925 static void
   15926 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   15927 {
   15928 	uint32_t reg;
   15929 
   15930 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15931 		device_xname(sc->sc_dev), __func__));
   15932 
   15933 	if (sc->sc_type < WM_T_PCH2)
   15934 		return;
   15935 
   15936 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15937 
   15938 	if (gate)
   15939 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   15940 	else
   15941 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   15942 
   15943 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15944 }
   15945 
   15946 static int
   15947 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   15948 {
   15949 	uint32_t fwsm, reg;
   15950 	int rv;
   15951 
   15952 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15953 		device_xname(sc->sc_dev), __func__));
   15954 
   15955 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   15956 	wm_gate_hw_phy_config_ich8lan(sc, true);
   15957 
   15958 	/* Disable ULP */
   15959 	wm_ulp_disable(sc);
   15960 
   15961 	/* Acquire PHY semaphore */
   15962 	rv = sc->phy.acquire(sc);
   15963 	if (rv != 0) {
   15964 		DPRINTF(sc, WM_DEBUG_INIT,
   15965 		    ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
   15966 		return rv;
   15967 	}
   15968 
   15969 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   15970 	 * inaccessible and resetting the PHY is not blocked, toggle the
   15971 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   15972 	 */
   15973 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15974 	switch (sc->sc_type) {
   15975 	case WM_T_PCH_LPT:
   15976 	case WM_T_PCH_SPT:
   15977 	case WM_T_PCH_CNP:
   15978 		if (wm_phy_is_accessible_pchlan(sc))
   15979 			break;
   15980 
   15981 		/* Before toggling LANPHYPC, see if PHY is accessible by
   15982 		 * forcing MAC to SMBus mode first.
   15983 		 */
   15984 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15985 		reg |= CTRL_EXT_FORCE_SMBUS;
   15986 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15987 #if 0
   15988 		/* XXX Isn't this required??? */
   15989 		CSR_WRITE_FLUSH(sc);
   15990 #endif
   15991 		/* Wait 50 milliseconds for MAC to finish any retries
   15992 		 * that it might be trying to perform from previous
   15993 		 * attempts to acknowledge any phy read requests.
   15994 		 */
   15995 		delay(50 * 1000);
   15996 		/* FALLTHROUGH */
   15997 	case WM_T_PCH2:
   15998 		if (wm_phy_is_accessible_pchlan(sc) == true)
   15999 			break;
   16000 		/* FALLTHROUGH */
   16001 	case WM_T_PCH:
   16002 		if (sc->sc_type == WM_T_PCH)
   16003 			if ((fwsm & FWSM_FW_VALID) != 0)
   16004 				break;
   16005 
   16006 		if (wm_phy_resetisblocked(sc) == true) {
   16007 			device_printf(sc->sc_dev, "XXX reset is blocked(2)\n");
   16008 			break;
   16009 		}
   16010 
   16011 		/* Toggle LANPHYPC Value bit */
   16012 		wm_toggle_lanphypc_pch_lpt(sc);
   16013 
   16014 		if (sc->sc_type >= WM_T_PCH_LPT) {
   16015 			if (wm_phy_is_accessible_pchlan(sc) == true)
   16016 				break;
   16017 
   16018 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   16019 			 * so ensure that the MAC is also out of SMBus mode
   16020 			 */
   16021 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16022 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16023 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16024 
   16025 			if (wm_phy_is_accessible_pchlan(sc) == true)
   16026 				break;
   16027 			rv = -1;
   16028 		}
   16029 		break;
   16030 	default:
   16031 		break;
   16032 	}
   16033 
   16034 	/* Release semaphore */
   16035 	sc->phy.release(sc);
   16036 
   16037 	if (rv == 0) {
   16038 		/* Check to see if able to reset PHY.  Print error if not */
   16039 		if (wm_phy_resetisblocked(sc)) {
   16040 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   16041 			goto out;
   16042 		}
   16043 
   16044 		/* Reset the PHY before any access to it.  Doing so, ensures
   16045 		 * that the PHY is in a known good state before we read/write
   16046 		 * PHY registers.  The generic reset is sufficient here,
   16047 		 * because we haven't determined the PHY type yet.
   16048 		 */
   16049 		if (wm_reset_phy(sc) != 0)
   16050 			goto out;
   16051 
   16052 		/* On a successful reset, possibly need to wait for the PHY
   16053 		 * to quiesce to an accessible state before returning control
   16054 		 * to the calling function.  If the PHY does not quiesce, then
   16055 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   16056 		 *  the PHY is in.
   16057 		 */
   16058 		if (wm_phy_resetisblocked(sc))
   16059 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   16060 	}
   16061 
   16062 out:
   16063 	/* Ungate automatic PHY configuration on non-managed 82579 */
   16064 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   16065 		delay(10*1000);
   16066 		wm_gate_hw_phy_config_ich8lan(sc, false);
   16067 	}
   16068 
   16069 	return 0;
   16070 }
   16071 
   16072 static void
   16073 wm_init_manageability(struct wm_softc *sc)
   16074 {
   16075 
   16076 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16077 		device_xname(sc->sc_dev), __func__));
   16078 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
   16079 
   16080 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   16081 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   16082 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   16083 
   16084 		/* Disable hardware interception of ARP */
   16085 		manc &= ~MANC_ARP_EN;
   16086 
   16087 		/* Enable receiving management packets to the host */
   16088 		if (sc->sc_type >= WM_T_82571) {
   16089 			manc |= MANC_EN_MNG2HOST;
   16090 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   16091 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   16092 		}
   16093 
   16094 		CSR_WRITE(sc, WMREG_MANC, manc);
   16095 	}
   16096 }
   16097 
   16098 static void
   16099 wm_release_manageability(struct wm_softc *sc)
   16100 {
   16101 
   16102 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   16103 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   16104 
   16105 		manc |= MANC_ARP_EN;
   16106 		if (sc->sc_type >= WM_T_82571)
   16107 			manc &= ~MANC_EN_MNG2HOST;
   16108 
   16109 		CSR_WRITE(sc, WMREG_MANC, manc);
   16110 	}
   16111 }
   16112 
   16113 static void
   16114 wm_get_wakeup(struct wm_softc *sc)
   16115 {
   16116 
   16117 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   16118 	switch (sc->sc_type) {
   16119 	case WM_T_82573:
   16120 	case WM_T_82583:
   16121 		sc->sc_flags |= WM_F_HAS_AMT;
   16122 		/* FALLTHROUGH */
   16123 	case WM_T_80003:
   16124 	case WM_T_82575:
   16125 	case WM_T_82576:
   16126 	case WM_T_82580:
   16127 	case WM_T_I350:
   16128 	case WM_T_I354:
   16129 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   16130 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   16131 		/* FALLTHROUGH */
   16132 	case WM_T_82541:
   16133 	case WM_T_82541_2:
   16134 	case WM_T_82547:
   16135 	case WM_T_82547_2:
   16136 	case WM_T_82571:
   16137 	case WM_T_82572:
   16138 	case WM_T_82574:
   16139 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   16140 		break;
   16141 	case WM_T_ICH8:
   16142 	case WM_T_ICH9:
   16143 	case WM_T_ICH10:
   16144 	case WM_T_PCH:
   16145 	case WM_T_PCH2:
   16146 	case WM_T_PCH_LPT:
   16147 	case WM_T_PCH_SPT:
   16148 	case WM_T_PCH_CNP:
   16149 		sc->sc_flags |= WM_F_HAS_AMT;
   16150 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   16151 		break;
   16152 	default:
   16153 		break;
   16154 	}
   16155 
   16156 	/* 1: HAS_MANAGE */
   16157 	if (wm_enable_mng_pass_thru(sc) != 0)
   16158 		sc->sc_flags |= WM_F_HAS_MANAGE;
   16159 
   16160 	/*
   16161 	 * Note that the WOL flags is set after the resetting of the eeprom
   16162 	 * stuff
   16163 	 */
   16164 }
   16165 
   16166 /*
   16167  * Unconfigure Ultra Low Power mode.
   16168  * Only for I217 and newer (see below).
   16169  */
   16170 static int
   16171 wm_ulp_disable(struct wm_softc *sc)
   16172 {
   16173 	uint32_t reg;
   16174 	uint16_t phyreg;
   16175 	int i = 0, rv;
   16176 
   16177 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16178 		device_xname(sc->sc_dev), __func__));
   16179 	/* Exclude old devices */
   16180 	if ((sc->sc_type < WM_T_PCH_LPT)
   16181 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   16182 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   16183 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   16184 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   16185 		return 0;
   16186 
   16187 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   16188 		/* Request ME un-configure ULP mode in the PHY */
   16189 		reg = CSR_READ(sc, WMREG_H2ME);
   16190 		reg &= ~H2ME_ULP;
   16191 		reg |= H2ME_ENFORCE_SETTINGS;
   16192 		CSR_WRITE(sc, WMREG_H2ME, reg);
   16193 
   16194 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   16195 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   16196 			if (i++ == 30) {
   16197 				device_printf(sc->sc_dev, "%s timed out\n",
   16198 				    __func__);
   16199 				return -1;
   16200 			}
   16201 			delay(10 * 1000);
   16202 		}
   16203 		reg = CSR_READ(sc, WMREG_H2ME);
   16204 		reg &= ~H2ME_ENFORCE_SETTINGS;
   16205 		CSR_WRITE(sc, WMREG_H2ME, reg);
   16206 
   16207 		return 0;
   16208 	}
   16209 
   16210 	/* Acquire semaphore */
   16211 	rv = sc->phy.acquire(sc);
   16212 	if (rv != 0) {
   16213 		DPRINTF(sc, WM_DEBUG_INIT,
   16214 		    ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
   16215 		return rv;
   16216 	}
   16217 
   16218 	/* Toggle LANPHYPC */
   16219 	wm_toggle_lanphypc_pch_lpt(sc);
   16220 
   16221 	/* Unforce SMBus mode in PHY */
   16222 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   16223 	if (rv != 0) {
   16224 		uint32_t reg2;
   16225 
   16226 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   16227 		    __func__);
   16228 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   16229 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   16230 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   16231 		delay(50 * 1000);
   16232 
   16233 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   16234 		    &phyreg);
   16235 		if (rv != 0)
   16236 			goto release;
   16237 	}
   16238 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   16239 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   16240 
   16241 	/* Unforce SMBus mode in MAC */
   16242 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16243 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   16244 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16245 
   16246 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   16247 	if (rv != 0)
   16248 		goto release;
   16249 	phyreg |= HV_PM_CTRL_K1_ENA;
   16250 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   16251 
   16252 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   16253 	    &phyreg);
   16254 	if (rv != 0)
   16255 		goto release;
   16256 	phyreg &= ~(I218_ULP_CONFIG1_IND
   16257 	    | I218_ULP_CONFIG1_STICKY_ULP
   16258 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   16259 	    | I218_ULP_CONFIG1_WOL_HOST
   16260 	    | I218_ULP_CONFIG1_INBAND_EXIT
   16261 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   16262 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   16263 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   16264 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   16265 	phyreg |= I218_ULP_CONFIG1_START;
   16266 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   16267 
   16268 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16269 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   16270 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16271 
   16272 release:
   16273 	/* Release semaphore */
   16274 	sc->phy.release(sc);
   16275 	wm_gmii_reset(sc);
   16276 	delay(50 * 1000);
   16277 
   16278 	return rv;
   16279 }
   16280 
   16281 /* WOL in the newer chipset interfaces (pchlan) */
   16282 static int
   16283 wm_enable_phy_wakeup(struct wm_softc *sc)
   16284 {
   16285 	device_t dev = sc->sc_dev;
   16286 	uint32_t mreg, moff;
   16287 	uint16_t wuce, wuc, wufc, preg;
   16288 	int i, rv;
   16289 
   16290 	KASSERT(sc->sc_type >= WM_T_PCH);
   16291 
   16292 	/* Copy MAC RARs to PHY RARs */
   16293 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   16294 
   16295 	/* Activate PHY wakeup */
   16296 	rv = sc->phy.acquire(sc);
   16297 	if (rv != 0) {
   16298 		device_printf(dev, "%s: failed to acquire semaphore\n",
   16299 		    __func__);
   16300 		return rv;
   16301 	}
   16302 
   16303 	/*
   16304 	 * Enable access to PHY wakeup registers.
   16305 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   16306 	 */
   16307 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   16308 	if (rv != 0) {
   16309 		device_printf(dev,
   16310 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   16311 		goto release;
   16312 	}
   16313 
   16314 	/* Copy MAC MTA to PHY MTA */
   16315 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   16316 		uint16_t lo, hi;
   16317 
   16318 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   16319 		lo = (uint16_t)(mreg & 0xffff);
   16320 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   16321 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   16322 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   16323 	}
   16324 
   16325 	/* Configure PHY Rx Control register */
   16326 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   16327 	mreg = CSR_READ(sc, WMREG_RCTL);
   16328 	if (mreg & RCTL_UPE)
   16329 		preg |= BM_RCTL_UPE;
   16330 	if (mreg & RCTL_MPE)
   16331 		preg |= BM_RCTL_MPE;
   16332 	preg &= ~(BM_RCTL_MO_MASK);
   16333 	moff = __SHIFTOUT(mreg, RCTL_MO);
   16334 	if (moff != 0)
   16335 		preg |= moff << BM_RCTL_MO_SHIFT;
   16336 	if (mreg & RCTL_BAM)
   16337 		preg |= BM_RCTL_BAM;
   16338 	if (mreg & RCTL_PMCF)
   16339 		preg |= BM_RCTL_PMCF;
   16340 	mreg = CSR_READ(sc, WMREG_CTRL);
   16341 	if (mreg & CTRL_RFCE)
   16342 		preg |= BM_RCTL_RFCE;
   16343 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   16344 
   16345 	wuc = WUC_APME | WUC_PME_EN;
   16346 	wufc = WUFC_MAG;
   16347 	/* Enable PHY wakeup in MAC register */
   16348 	CSR_WRITE(sc, WMREG_WUC,
   16349 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   16350 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   16351 
   16352 	/* Configure and enable PHY wakeup in PHY registers */
   16353 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   16354 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   16355 
   16356 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   16357 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   16358 
   16359 release:
   16360 	sc->phy.release(sc);
   16361 
   16362 	return 0;
   16363 }
   16364 
   16365 /* Power down workaround on D3 */
   16366 static void
   16367 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   16368 {
   16369 	uint32_t reg;
   16370 	uint16_t phyreg;
   16371 	int i;
   16372 
   16373 	for (i = 0; i < 2; i++) {
   16374 		/* Disable link */
   16375 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16376 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   16377 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16378 
   16379 		/*
   16380 		 * Call gig speed drop workaround on Gig disable before
   16381 		 * accessing any PHY registers
   16382 		 */
   16383 		if (sc->sc_type == WM_T_ICH8)
   16384 			wm_gig_downshift_workaround_ich8lan(sc);
   16385 
   16386 		/* Write VR power-down enable */
   16387 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   16388 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   16389 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   16390 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   16391 
   16392 		/* Read it back and test */
   16393 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   16394 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   16395 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   16396 			break;
   16397 
   16398 		/* Issue PHY reset and repeat at most one more time */
   16399 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   16400 	}
   16401 }
   16402 
   16403 /*
   16404  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   16405  *  @sc: pointer to the HW structure
   16406  *
   16407  *  During S0 to Sx transition, it is possible the link remains at gig
   16408  *  instead of negotiating to a lower speed.  Before going to Sx, set
   16409  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   16410  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   16411  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   16412  *  needs to be written.
   16413  *  Parts that support (and are linked to a partner which support) EEE in
   16414  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   16415  *  than 10Mbps w/o EEE.
   16416  */
   16417 static void
   16418 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   16419 {
   16420 	device_t dev = sc->sc_dev;
   16421 	struct ethercom *ec = &sc->sc_ethercom;
   16422 	uint32_t phy_ctrl;
   16423 	int rv;
   16424 
   16425 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   16426 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   16427 
   16428 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   16429 
   16430 	if (sc->sc_phytype == WMPHY_I217) {
   16431 		uint16_t devid = sc->sc_pcidevid;
   16432 
   16433 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   16434 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   16435 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   16436 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   16437 		    (sc->sc_type >= WM_T_PCH_SPT))
   16438 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   16439 			    CSR_READ(sc, WMREG_FEXTNVM6)
   16440 			    & ~FEXTNVM6_REQ_PLL_CLK);
   16441 
   16442 		if (sc->phy.acquire(sc) != 0)
   16443 			goto out;
   16444 
   16445 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16446 			uint16_t eee_advert;
   16447 
   16448 			rv = wm_read_emi_reg_locked(dev,
   16449 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   16450 			if (rv)
   16451 				goto release;
   16452 
   16453 			/*
   16454 			 * Disable LPLU if both link partners support 100BaseT
   16455 			 * EEE and 100Full is advertised on both ends of the
   16456 			 * link, and enable Auto Enable LPI since there will
   16457 			 * be no driver to enable LPI while in Sx.
   16458 			 */
   16459 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   16460 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   16461 				uint16_t anar, phy_reg;
   16462 
   16463 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   16464 				    &anar);
   16465 				if (anar & ANAR_TX_FD) {
   16466 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   16467 					    PHY_CTRL_NOND0A_LPLU);
   16468 
   16469 					/* Set Auto Enable LPI after link up */
   16470 					sc->phy.readreg_locked(dev, 2,
   16471 					    I217_LPI_GPIO_CTRL, &phy_reg);
   16472 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16473 					sc->phy.writereg_locked(dev, 2,
   16474 					    I217_LPI_GPIO_CTRL, phy_reg);
   16475 				}
   16476 			}
   16477 		}
   16478 
   16479 		/*
   16480 		 * For i217 Intel Rapid Start Technology support,
   16481 		 * when the system is going into Sx and no manageability engine
   16482 		 * is present, the driver must configure proxy to reset only on
   16483 		 * power good.	LPI (Low Power Idle) state must also reset only
   16484 		 * on power good, as well as the MTA (Multicast table array).
   16485 		 * The SMBus release must also be disabled on LCD reset.
   16486 		 */
   16487 
   16488 		/*
   16489 		 * Enable MTA to reset for Intel Rapid Start Technology
   16490 		 * Support
   16491 		 */
   16492 
   16493 release:
   16494 		sc->phy.release(sc);
   16495 	}
   16496 out:
   16497 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   16498 
   16499 	if (sc->sc_type == WM_T_ICH8)
   16500 		wm_gig_downshift_workaround_ich8lan(sc);
   16501 
   16502 	if (sc->sc_type >= WM_T_PCH) {
   16503 		wm_oem_bits_config_ich8lan(sc, false);
   16504 
   16505 		/* Reset PHY to activate OEM bits on 82577/8 */
   16506 		if (sc->sc_type == WM_T_PCH)
   16507 			wm_reset_phy(sc);
   16508 
   16509 		if (sc->phy.acquire(sc) != 0)
   16510 			return;
   16511 		wm_write_smbus_addr(sc);
   16512 		sc->phy.release(sc);
   16513 	}
   16514 }
   16515 
   16516 /*
   16517  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   16518  *  @sc: pointer to the HW structure
   16519  *
   16520  *  During Sx to S0 transitions on non-managed devices or managed devices
   16521  *  on which PHY resets are not blocked, if the PHY registers cannot be
   16522  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   16523  *  the PHY.
   16524  *  On i217, setup Intel Rapid Start Technology.
   16525  */
   16526 static int
   16527 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   16528 {
   16529 	device_t dev = sc->sc_dev;
   16530 	int rv;
   16531 
   16532 	if (sc->sc_type < WM_T_PCH2)
   16533 		return 0;
   16534 
   16535 	rv = wm_init_phy_workarounds_pchlan(sc);
   16536 	if (rv != 0)
   16537 		return rv;
   16538 
   16539 	/* For i217 Intel Rapid Start Technology support when the system
   16540 	 * is transitioning from Sx and no manageability engine is present
   16541 	 * configure SMBus to restore on reset, disable proxy, and enable
   16542 	 * the reset on MTA (Multicast table array).
   16543 	 */
   16544 	if (sc->sc_phytype == WMPHY_I217) {
   16545 		uint16_t phy_reg;
   16546 
   16547 		rv = sc->phy.acquire(sc);
   16548 		if (rv != 0)
   16549 			return rv;
   16550 
   16551 		/* Clear Auto Enable LPI after link up */
   16552 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   16553 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16554 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   16555 
   16556 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16557 			/* Restore clear on SMB if no manageability engine
   16558 			 * is present
   16559 			 */
   16560 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   16561 			    &phy_reg);
   16562 			if (rv != 0)
   16563 				goto release;
   16564 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   16565 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   16566 
   16567 			/* Disable Proxy */
   16568 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   16569 		}
   16570 		/* Enable reset on MTA */
   16571 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   16572 		if (rv != 0)
   16573 			goto release;
   16574 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   16575 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   16576 
   16577 release:
   16578 		sc->phy.release(sc);
   16579 		return rv;
   16580 	}
   16581 
   16582 	return 0;
   16583 }
   16584 
   16585 static void
   16586 wm_enable_wakeup(struct wm_softc *sc)
   16587 {
   16588 	uint32_t reg, pmreg;
   16589 	pcireg_t pmode;
   16590 	int rv = 0;
   16591 
   16592 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16593 		device_xname(sc->sc_dev), __func__));
   16594 
   16595 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16596 	    &pmreg, NULL) == 0)
   16597 		return;
   16598 
   16599 	if ((sc->sc_flags & WM_F_WOL) == 0)
   16600 		goto pme;
   16601 
   16602 	/* Advertise the wakeup capability */
   16603 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   16604 	    | CTRL_SWDPIN(3));
   16605 
   16606 	/* Keep the laser running on fiber adapters */
   16607 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   16608 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   16609 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16610 		reg |= CTRL_EXT_SWDPIN(3);
   16611 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16612 	}
   16613 
   16614 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   16615 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   16616 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   16617 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   16618 		wm_suspend_workarounds_ich8lan(sc);
   16619 
   16620 #if 0	/* For the multicast packet */
   16621 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   16622 	reg |= WUFC_MC;
   16623 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   16624 #endif
   16625 
   16626 	if (sc->sc_type >= WM_T_PCH) {
   16627 		rv = wm_enable_phy_wakeup(sc);
   16628 		if (rv != 0)
   16629 			goto pme;
   16630 	} else {
   16631 		/* Enable wakeup by the MAC */
   16632 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   16633 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   16634 	}
   16635 
   16636 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   16637 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   16638 		|| (sc->sc_type == WM_T_PCH2))
   16639 	    && (sc->sc_phytype == WMPHY_IGP_3))
   16640 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   16641 
   16642 pme:
   16643 	/* Request PME */
   16644 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   16645 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   16646 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   16647 		/* For WOL */
   16648 		pmode |= PCI_PMCSR_PME_EN;
   16649 	} else {
   16650 		/* Disable WOL */
   16651 		pmode &= ~PCI_PMCSR_PME_EN;
   16652 	}
   16653 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   16654 }
   16655 
   16656 /* Disable ASPM L0s and/or L1 for workaround */
   16657 static void
   16658 wm_disable_aspm(struct wm_softc *sc)
   16659 {
   16660 	pcireg_t reg, mask = 0;
   16661 	unsigned const char *str = "";
   16662 
   16663 	/*
   16664 	 *  Only for PCIe device which has PCIe capability in the PCI config
   16665 	 * space.
   16666 	 */
   16667 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   16668 		return;
   16669 
   16670 	switch (sc->sc_type) {
   16671 	case WM_T_82571:
   16672 	case WM_T_82572:
   16673 		/*
   16674 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   16675 		 * State Power management L1 State (ASPM L1).
   16676 		 */
   16677 		mask = PCIE_LCSR_ASPM_L1;
   16678 		str = "L1 is";
   16679 		break;
   16680 	case WM_T_82573:
   16681 	case WM_T_82574:
   16682 	case WM_T_82583:
   16683 		/*
   16684 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   16685 		 *
   16686 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   16687 		 * some chipset.  The document of 82574 and 82583 says that
   16688 		 * disabling L0s with some specific chipset is sufficient,
   16689 		 * but we follow as of the Intel em driver does.
   16690 		 *
   16691 		 * References:
   16692 		 * Errata 8 of the Specification Update of i82573.
   16693 		 * Errata 20 of the Specification Update of i82574.
   16694 		 * Errata 9 of the Specification Update of i82583.
   16695 		 */
   16696 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   16697 		str = "L0s and L1 are";
   16698 		break;
   16699 	default:
   16700 		return;
   16701 	}
   16702 
   16703 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16704 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   16705 	reg &= ~mask;
   16706 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16707 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   16708 
   16709 	/* Print only in wm_attach() */
   16710 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   16711 		aprint_verbose_dev(sc->sc_dev,
   16712 		    "ASPM %s disabled to workaround the errata.\n", str);
   16713 }
   16714 
   16715 /* LPLU */
   16716 
   16717 static void
   16718 wm_lplu_d0_disable(struct wm_softc *sc)
   16719 {
   16720 	struct mii_data *mii = &sc->sc_mii;
   16721 	uint32_t reg;
   16722 	uint16_t phyval;
   16723 
   16724 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16725 		device_xname(sc->sc_dev), __func__));
   16726 
   16727 	if (sc->sc_phytype == WMPHY_IFE)
   16728 		return;
   16729 
   16730 	switch (sc->sc_type) {
   16731 	case WM_T_82571:
   16732 	case WM_T_82572:
   16733 	case WM_T_82573:
   16734 	case WM_T_82575:
   16735 	case WM_T_82576:
   16736 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
   16737 		phyval &= ~PMR_D0_LPLU;
   16738 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
   16739 		break;
   16740 	case WM_T_82580:
   16741 	case WM_T_I350:
   16742 	case WM_T_I210:
   16743 	case WM_T_I211:
   16744 		reg = CSR_READ(sc, WMREG_PHPM);
   16745 		reg &= ~PHPM_D0A_LPLU;
   16746 		CSR_WRITE(sc, WMREG_PHPM, reg);
   16747 		break;
   16748 	case WM_T_82574:
   16749 	case WM_T_82583:
   16750 	case WM_T_ICH8:
   16751 	case WM_T_ICH9:
   16752 	case WM_T_ICH10:
   16753 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16754 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   16755 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16756 		CSR_WRITE_FLUSH(sc);
   16757 		break;
   16758 	case WM_T_PCH:
   16759 	case WM_T_PCH2:
   16760 	case WM_T_PCH_LPT:
   16761 	case WM_T_PCH_SPT:
   16762 	case WM_T_PCH_CNP:
   16763 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   16764 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   16765 		if (wm_phy_resetisblocked(sc) == false)
   16766 			phyval |= HV_OEM_BITS_ANEGNOW;
   16767 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   16768 		break;
   16769 	default:
   16770 		break;
   16771 	}
   16772 }
   16773 
   16774 /* EEE */
   16775 
   16776 static int
   16777 wm_set_eee_i350(struct wm_softc *sc)
   16778 {
   16779 	struct ethercom *ec = &sc->sc_ethercom;
   16780 	uint32_t ipcnfg, eeer;
   16781 	uint32_t ipcnfg_mask
   16782 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   16783 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   16784 
   16785 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   16786 
   16787 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   16788 	eeer = CSR_READ(sc, WMREG_EEER);
   16789 
   16790 	/* Enable or disable per user setting */
   16791 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16792 		ipcnfg |= ipcnfg_mask;
   16793 		eeer |= eeer_mask;
   16794 	} else {
   16795 		ipcnfg &= ~ipcnfg_mask;
   16796 		eeer &= ~eeer_mask;
   16797 	}
   16798 
   16799 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   16800 	CSR_WRITE(sc, WMREG_EEER, eeer);
   16801 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   16802 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   16803 
   16804 	return 0;
   16805 }
   16806 
   16807 static int
   16808 wm_set_eee_pchlan(struct wm_softc *sc)
   16809 {
   16810 	device_t dev = sc->sc_dev;
   16811 	struct ethercom *ec = &sc->sc_ethercom;
   16812 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   16813 	int rv;
   16814 
   16815 	switch (sc->sc_phytype) {
   16816 	case WMPHY_82579:
   16817 		lpa = I82579_EEE_LP_ABILITY;
   16818 		pcs_status = I82579_EEE_PCS_STATUS;
   16819 		adv_addr = I82579_EEE_ADVERTISEMENT;
   16820 		break;
   16821 	case WMPHY_I217:
   16822 		lpa = I217_EEE_LP_ABILITY;
   16823 		pcs_status = I217_EEE_PCS_STATUS;
   16824 		adv_addr = I217_EEE_ADVERTISEMENT;
   16825 		break;
   16826 	default:
   16827 		return 0;
   16828 	}
   16829 
   16830 	rv = sc->phy.acquire(sc);
   16831 	if (rv != 0) {
   16832 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   16833 		return rv;
   16834 	}
   16835 
   16836 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   16837 	if (rv != 0)
   16838 		goto release;
   16839 
   16840 	/* Clear bits that enable EEE in various speeds */
   16841 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   16842 
   16843 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16844 		/* Save off link partner's EEE ability */
   16845 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   16846 		if (rv != 0)
   16847 			goto release;
   16848 
   16849 		/* Read EEE advertisement */
   16850 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   16851 			goto release;
   16852 
   16853 		/*
   16854 		 * Enable EEE only for speeds in which the link partner is
   16855 		 * EEE capable and for which we advertise EEE.
   16856 		 */
   16857 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   16858 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   16859 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   16860 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   16861 			if ((data & ANLPAR_TX_FD) != 0)
   16862 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   16863 			else {
   16864 				/*
   16865 				 * EEE is not supported in 100Half, so ignore
   16866 				 * partner's EEE in 100 ability if full-duplex
   16867 				 * is not advertised.
   16868 				 */
   16869 				sc->eee_lp_ability
   16870 				    &= ~AN_EEEADVERT_100_TX;
   16871 			}
   16872 		}
   16873 	}
   16874 
   16875 	if (sc->sc_phytype == WMPHY_82579) {
   16876 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   16877 		if (rv != 0)
   16878 			goto release;
   16879 
   16880 		data &= ~I82579_LPI_PLL_SHUT_100;
   16881 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   16882 	}
   16883 
   16884 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   16885 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   16886 		goto release;
   16887 
   16888 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   16889 release:
   16890 	sc->phy.release(sc);
   16891 
   16892 	return rv;
   16893 }
   16894 
   16895 static int
   16896 wm_set_eee(struct wm_softc *sc)
   16897 {
   16898 	struct ethercom *ec = &sc->sc_ethercom;
   16899 
   16900 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   16901 		return 0;
   16902 
   16903 	if (sc->sc_type == WM_T_I354) {
   16904 		/* I354 uses an external PHY */
   16905 		return 0; /* not yet */
   16906 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   16907 		return wm_set_eee_i350(sc);
   16908 	else if (sc->sc_type >= WM_T_PCH2)
   16909 		return wm_set_eee_pchlan(sc);
   16910 
   16911 	return 0;
   16912 }
   16913 
   16914 /*
   16915  * Workarounds (mainly PHY related).
   16916  * Basically, PHY's workarounds are in the PHY drivers.
   16917  */
   16918 
   16919 /* Workaround for 82566 Kumeran PCS lock loss */
   16920 static int
   16921 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   16922 {
   16923 	struct mii_data *mii = &sc->sc_mii;
   16924 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16925 	int i, reg, rv;
   16926 	uint16_t phyreg;
   16927 
   16928 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16929 		device_xname(sc->sc_dev), __func__));
   16930 
   16931 	/* If the link is not up, do nothing */
   16932 	if ((status & STATUS_LU) == 0)
   16933 		return 0;
   16934 
   16935 	/* Nothing to do if the link is other than 1Gbps */
   16936 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   16937 		return 0;
   16938 
   16939 	for (i = 0; i < 10; i++) {
   16940 		/* read twice */
   16941 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16942 		if (rv != 0)
   16943 			return rv;
   16944 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16945 		if (rv != 0)
   16946 			return rv;
   16947 
   16948 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   16949 			goto out;	/* GOOD! */
   16950 
   16951 		/* Reset the PHY */
   16952 		wm_reset_phy(sc);
   16953 		delay(5*1000);
   16954 	}
   16955 
   16956 	/* Disable GigE link negotiation */
   16957 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16958 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   16959 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16960 
   16961 	/*
   16962 	 * Call gig speed drop workaround on Gig disable before accessing
   16963 	 * any PHY registers.
   16964 	 */
   16965 	wm_gig_downshift_workaround_ich8lan(sc);
   16966 
   16967 out:
   16968 	return 0;
   16969 }
   16970 
   16971 /*
   16972  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   16973  *  @sc: pointer to the HW structure
   16974  *
   16975  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   16976  *  LPLU, Gig disable, MDIC PHY reset):
   16977  *    1) Set Kumeran Near-end loopback
   16978  *    2) Clear Kumeran Near-end loopback
   16979  *  Should only be called for ICH8[m] devices with any 1G Phy.
   16980  */
   16981 static void
   16982 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   16983 {
   16984 	uint16_t kmreg;
   16985 
   16986 	/* Only for igp3 */
   16987 	if (sc->sc_phytype == WMPHY_IGP_3) {
   16988 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   16989 			return;
   16990 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   16991 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   16992 			return;
   16993 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   16994 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   16995 	}
   16996 }
   16997 
   16998 /*
   16999  * Workaround for pch's PHYs
   17000  * XXX should be moved to new PHY driver?
   17001  */
   17002 static int
   17003 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   17004 {
   17005 	device_t dev = sc->sc_dev;
   17006 	struct mii_data *mii = &sc->sc_mii;
   17007 	struct mii_softc *child;
   17008 	uint16_t phy_data, phyrev = 0;
   17009 	int phytype = sc->sc_phytype;
   17010 	int rv;
   17011 
   17012 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17013 		device_xname(dev), __func__));
   17014 	KASSERT(sc->sc_type == WM_T_PCH);
   17015 
   17016 	/* Set MDIO slow mode before any other MDIO access */
   17017 	if (phytype == WMPHY_82577)
   17018 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   17019 			return rv;
   17020 
   17021 	child = LIST_FIRST(&mii->mii_phys);
   17022 	if (child != NULL)
   17023 		phyrev = child->mii_mpd_rev;
   17024 
   17025 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   17026 	if ((child != NULL) &&
   17027 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   17028 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   17029 		/* Disable generation of early preamble (0x4431) */
   17030 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   17031 		    &phy_data);
   17032 		if (rv != 0)
   17033 			return rv;
   17034 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   17035 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   17036 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   17037 		    phy_data);
   17038 		if (rv != 0)
   17039 			return rv;
   17040 
   17041 		/* Preamble tuning for SSC */
   17042 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   17043 		if (rv != 0)
   17044 			return rv;
   17045 	}
   17046 
   17047 	/* 82578 */
   17048 	if (phytype == WMPHY_82578) {
   17049 		/*
   17050 		 * Return registers to default by doing a soft reset then
   17051 		 * writing 0x3140 to the control register
   17052 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   17053 		 */
   17054 		if ((child != NULL) && (phyrev < 2)) {
   17055 			PHY_RESET(child);
   17056 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   17057 			if (rv != 0)
   17058 				return rv;
   17059 		}
   17060 	}
   17061 
   17062 	/* Select page 0 */
   17063 	if ((rv = sc->phy.acquire(sc)) != 0)
   17064 		return rv;
   17065 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
   17066 	sc->phy.release(sc);
   17067 	if (rv != 0)
   17068 		return rv;
   17069 
   17070 	/*
   17071 	 * Configure the K1 Si workaround during phy reset assuming there is
   17072 	 * link so that it disables K1 if link is in 1Gbps.
   17073 	 */
   17074 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   17075 		return rv;
   17076 
   17077 	/* Workaround for link disconnects on a busy hub in half duplex */
   17078 	rv = sc->phy.acquire(sc);
   17079 	if (rv)
   17080 		return rv;
   17081 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   17082 	if (rv)
   17083 		goto release;
   17084 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   17085 	    phy_data & 0x00ff);
   17086 	if (rv)
   17087 		goto release;
   17088 
   17089 	/* Set MSE higher to enable link to stay up when noise is high */
   17090 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   17091 release:
   17092 	sc->phy.release(sc);
   17093 
   17094 	return rv;
   17095 }
   17096 
   17097 /*
   17098  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   17099  *  @sc:   pointer to the HW structure
   17100  */
   17101 static void
   17102 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   17103 {
   17104 
   17105 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17106 		device_xname(sc->sc_dev), __func__));
   17107 
   17108 	if (sc->phy.acquire(sc) != 0)
   17109 		return;
   17110 
   17111 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   17112 
   17113 	sc->phy.release(sc);
   17114 }
   17115 
   17116 static void
   17117 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
   17118 {
   17119 	device_t dev = sc->sc_dev;
   17120 	uint32_t mac_reg;
   17121 	uint16_t i, wuce;
   17122 	int count;
   17123 
   17124 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17125 		device_xname(dev), __func__));
   17126 
   17127 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   17128 		return;
   17129 
   17130 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   17131 	count = wm_rar_count(sc);
   17132 	for (i = 0; i < count; i++) {
   17133 		uint16_t lo, hi;
   17134 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   17135 		lo = (uint16_t)(mac_reg & 0xffff);
   17136 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   17137 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   17138 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   17139 
   17140 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   17141 		lo = (uint16_t)(mac_reg & 0xffff);
   17142 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   17143 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   17144 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   17145 	}
   17146 
   17147 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   17148 }
   17149 
   17150 /*
   17151  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
   17152  *  with 82579 PHY
   17153  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
   17154  */
   17155 static int
   17156 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
   17157 {
   17158 	device_t dev = sc->sc_dev;
   17159 	int rar_count;
   17160 	int rv;
   17161 	uint32_t mac_reg;
   17162 	uint16_t dft_ctrl, data;
   17163 	uint16_t i;
   17164 
   17165 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17166 		device_xname(dev), __func__));
   17167 
   17168 	if (sc->sc_type < WM_T_PCH2)
   17169 		return 0;
   17170 
   17171 	/* Acquire PHY semaphore */
   17172 	rv = sc->phy.acquire(sc);
   17173 	if (rv != 0)
   17174 		return rv;
   17175 
   17176 	/* Disable Rx path while enabling/disabling workaround */
   17177 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
   17178 	if (rv != 0)
   17179 		goto out;
   17180 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   17181 	    dft_ctrl | (1 << 14));
   17182 	if (rv != 0)
   17183 		goto out;
   17184 
   17185 	if (enable) {
   17186 		/* Write Rx addresses (rar_entry_count for RAL/H, and
   17187 		 * SHRAL/H) and initial CRC values to the MAC
   17188 		 */
   17189 		rar_count = wm_rar_count(sc);
   17190 		for (i = 0; i < rar_count; i++) {
   17191 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
   17192 			uint32_t addr_high, addr_low;
   17193 
   17194 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   17195 			if (!(addr_high & RAL_AV))
   17196 				continue;
   17197 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   17198 			mac_addr[0] = (addr_low & 0xFF);
   17199 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
   17200 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
   17201 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
   17202 			mac_addr[4] = (addr_high & 0xFF);
   17203 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
   17204 
   17205 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
   17206 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
   17207 		}
   17208 
   17209 		/* Write Rx addresses to the PHY */
   17210 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   17211 	}
   17212 
   17213 	/*
   17214 	 * If enable ==
   17215 	 *	true: Enable jumbo frame workaround in the MAC.
   17216 	 *	false: Write MAC register values back to h/w defaults.
   17217 	 */
   17218 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
   17219 	if (enable) {
   17220 		mac_reg &= ~(1 << 14);
   17221 		mac_reg |= (7 << 15);
   17222 	} else
   17223 		mac_reg &= ~(0xf << 14);
   17224 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
   17225 
   17226 	mac_reg = CSR_READ(sc, WMREG_RCTL);
   17227 	if (enable) {
   17228 		mac_reg |= RCTL_SECRC;
   17229 		sc->sc_rctl |= RCTL_SECRC;
   17230 		sc->sc_flags |= WM_F_CRC_STRIP;
   17231 	} else {
   17232 		mac_reg &= ~RCTL_SECRC;
   17233 		sc->sc_rctl &= ~RCTL_SECRC;
   17234 		sc->sc_flags &= ~WM_F_CRC_STRIP;
   17235 	}
   17236 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
   17237 
   17238 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
   17239 	if (rv != 0)
   17240 		goto out;
   17241 	if (enable)
   17242 		data |= 1 << 0;
   17243 	else
   17244 		data &= ~(1 << 0);
   17245 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
   17246 	if (rv != 0)
   17247 		goto out;
   17248 
   17249 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
   17250 	if (rv != 0)
   17251 		goto out;
   17252 	/*
   17253 	 * XXX FreeBSD and Linux do the same thing that they set the same value
   17254 	 * on both the enable case and the disable case. Is it correct?
   17255 	 */
   17256 	data &= ~(0xf << 8);
   17257 	data |= (0xb << 8);
   17258 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
   17259 	if (rv != 0)
   17260 		goto out;
   17261 
   17262 	/*
   17263 	 * If enable ==
   17264 	 *	true: Enable jumbo frame workaround in the PHY.
   17265 	 *	false: Write PHY register values back to h/w defaults.
   17266 	 */
   17267 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
   17268 	if (rv != 0)
   17269 		goto out;
   17270 	data &= ~(0x7F << 5);
   17271 	if (enable)
   17272 		data |= (0x37 << 5);
   17273 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
   17274 	if (rv != 0)
   17275 		goto out;
   17276 
   17277 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
   17278 	if (rv != 0)
   17279 		goto out;
   17280 	if (enable)
   17281 		data &= ~(1 << 13);
   17282 	else
   17283 		data |= (1 << 13);
   17284 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
   17285 	if (rv != 0)
   17286 		goto out;
   17287 
   17288 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
   17289 	if (rv != 0)
   17290 		goto out;
   17291 	data &= ~(0x3FF << 2);
   17292 	if (enable)
   17293 		data |= (I82579_TX_PTR_GAP << 2);
   17294 	else
   17295 		data |= (0x8 << 2);
   17296 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
   17297 	if (rv != 0)
   17298 		goto out;
   17299 
   17300 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
   17301 	    enable ? 0xf100 : 0x7e00);
   17302 	if (rv != 0)
   17303 		goto out;
   17304 
   17305 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
   17306 	if (rv != 0)
   17307 		goto out;
   17308 	if (enable)
   17309 		data |= 1 << 10;
   17310 	else
   17311 		data &= ~(1 << 10);
   17312 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
   17313 	if (rv != 0)
   17314 		goto out;
   17315 
   17316 	/* Re-enable Rx path after enabling/disabling workaround */
   17317 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   17318 	    dft_ctrl & ~(1 << 14));
   17319 
   17320 out:
   17321 	sc->phy.release(sc);
   17322 
   17323 	return rv;
   17324 }
   17325 
   17326 /*
   17327  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   17328  *  done after every PHY reset.
   17329  */
   17330 static int
   17331 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   17332 {
   17333 	device_t dev = sc->sc_dev;
   17334 	int rv;
   17335 
   17336 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17337 		device_xname(dev), __func__));
   17338 	KASSERT(sc->sc_type == WM_T_PCH2);
   17339 
   17340 	/* Set MDIO slow mode before any other MDIO access */
   17341 	rv = wm_set_mdio_slow_mode_hv(sc);
   17342 	if (rv != 0)
   17343 		return rv;
   17344 
   17345 	rv = sc->phy.acquire(sc);
   17346 	if (rv != 0)
   17347 		return rv;
   17348 	/* Set MSE higher to enable link to stay up when noise is high */
   17349 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   17350 	if (rv != 0)
   17351 		goto release;
   17352 	/* Drop link after 5 times MSE threshold was reached */
   17353 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   17354 release:
   17355 	sc->phy.release(sc);
   17356 
   17357 	return rv;
   17358 }
   17359 
   17360 /**
   17361  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   17362  *  @link: link up bool flag
   17363  *
   17364  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   17365  *  preventing further DMA write requests.  Workaround the issue by disabling
   17366  *  the de-assertion of the clock request when in 1Gpbs mode.
   17367  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   17368  *  speeds in order to avoid Tx hangs.
   17369  **/
   17370 static int
   17371 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   17372 {
   17373 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   17374 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   17375 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   17376 	uint16_t phyreg;
   17377 
   17378 	if (link && (speed == STATUS_SPEED_1000)) {
   17379 		int rv;
   17380 
   17381 		rv = sc->phy.acquire(sc);
   17382 		if (rv != 0)
   17383 			return rv;
   17384 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17385 		    &phyreg);
   17386 		if (rv != 0)
   17387 			goto release;
   17388 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17389 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   17390 		if (rv != 0)
   17391 			goto release;
   17392 		delay(20);
   17393 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   17394 
   17395 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17396 		    &phyreg);
   17397 release:
   17398 		sc->phy.release(sc);
   17399 		return rv;
   17400 	}
   17401 
   17402 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   17403 
   17404 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   17405 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   17406 	    || !link
   17407 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   17408 		goto update_fextnvm6;
   17409 
   17410 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   17411 
   17412 	/* Clear link status transmit timeout */
   17413 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   17414 	if (speed == STATUS_SPEED_100) {
   17415 		/* Set inband Tx timeout to 5x10us for 100Half */
   17416 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   17417 
   17418 		/* Do not extend the K1 entry latency for 100Half */
   17419 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   17420 	} else {
   17421 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   17422 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   17423 
   17424 		/* Extend the K1 entry latency for 10 Mbps */
   17425 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   17426 	}
   17427 
   17428 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   17429 
   17430 update_fextnvm6:
   17431 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   17432 	return 0;
   17433 }
   17434 
   17435 /*
   17436  *  wm_k1_gig_workaround_hv - K1 Si workaround
   17437  *  @sc:   pointer to the HW structure
   17438  *  @link: link up bool flag
   17439  *
   17440  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   17441  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   17442  *  If link is down, the function will restore the default K1 setting located
   17443  *  in the NVM.
   17444  */
   17445 static int
   17446 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   17447 {
   17448 	int k1_enable = sc->sc_nvm_k1_enabled;
   17449 	int rv;
   17450 
   17451 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17452 		device_xname(sc->sc_dev), __func__));
   17453 
   17454 	rv = sc->phy.acquire(sc);
   17455 	if (rv != 0)
   17456 		return rv;
   17457 
   17458 	if (link) {
   17459 		k1_enable = 0;
   17460 
   17461 		/* Link stall fix for link up */
   17462 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   17463 		    0x0100);
   17464 	} else {
   17465 		/* Link stall fix for link down */
   17466 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   17467 		    0x4100);
   17468 	}
   17469 
   17470 	wm_configure_k1_ich8lan(sc, k1_enable);
   17471 	sc->phy.release(sc);
   17472 
   17473 	return 0;
   17474 }
   17475 
   17476 /*
   17477  *  wm_k1_workaround_lv - K1 Si workaround
   17478  *  @sc:   pointer to the HW structure
   17479  *
   17480  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   17481  *  Disable K1 for 1000 and 100 speeds
   17482  */
   17483 static int
   17484 wm_k1_workaround_lv(struct wm_softc *sc)
   17485 {
   17486 	uint32_t reg;
   17487 	uint16_t phyreg;
   17488 	int rv;
   17489 
   17490 	if (sc->sc_type != WM_T_PCH2)
   17491 		return 0;
   17492 
   17493 	/* Set K1 beacon duration based on 10Mbps speed */
   17494 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   17495 	if (rv != 0)
   17496 		return rv;
   17497 
   17498 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   17499 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   17500 		if (phyreg &
   17501 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   17502 			/* LV 1G/100 Packet drop issue wa  */
   17503 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   17504 			    &phyreg);
   17505 			if (rv != 0)
   17506 				return rv;
   17507 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   17508 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   17509 			    phyreg);
   17510 			if (rv != 0)
   17511 				return rv;
   17512 		} else {
   17513 			/* For 10Mbps */
   17514 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   17515 			reg &= ~FEXTNVM4_BEACON_DURATION;
   17516 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   17517 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   17518 		}
   17519 	}
   17520 
   17521 	return 0;
   17522 }
   17523 
   17524 /*
   17525  *  wm_link_stall_workaround_hv - Si workaround
   17526  *  @sc: pointer to the HW structure
   17527  *
   17528  *  This function works around a Si bug where the link partner can get
   17529  *  a link up indication before the PHY does. If small packets are sent
   17530  *  by the link partner they can be placed in the packet buffer without
   17531  *  being properly accounted for by the PHY and will stall preventing
   17532  *  further packets from being received.  The workaround is to clear the
   17533  *  packet buffer after the PHY detects link up.
   17534  */
   17535 static int
   17536 wm_link_stall_workaround_hv(struct wm_softc *sc)
   17537 {
   17538 	uint16_t phyreg;
   17539 
   17540 	if (sc->sc_phytype != WMPHY_82578)
   17541 		return 0;
   17542 
   17543 	/* Do not apply workaround if in PHY loopback bit 14 set */
   17544 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   17545 	if ((phyreg & BMCR_LOOP) != 0)
   17546 		return 0;
   17547 
   17548 	/* Check if link is up and at 1Gbps */
   17549 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   17550 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17551 	    | BM_CS_STATUS_SPEED_MASK;
   17552 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17553 		| BM_CS_STATUS_SPEED_1000))
   17554 		return 0;
   17555 
   17556 	delay(200 * 1000);	/* XXX too big */
   17557 
   17558 	/* Flush the packets in the fifo buffer */
   17559 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17560 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   17561 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17562 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   17563 
   17564 	return 0;
   17565 }
   17566 
   17567 static int
   17568 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   17569 {
   17570 	int rv;
   17571 
   17572 	rv = sc->phy.acquire(sc);
   17573 	if (rv != 0) {
   17574 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   17575 		    __func__);
   17576 		return rv;
   17577 	}
   17578 
   17579 	rv = wm_set_mdio_slow_mode_hv_locked(sc);
   17580 
   17581 	sc->phy.release(sc);
   17582 
   17583 	return rv;
   17584 }
   17585 
   17586 static int
   17587 wm_set_mdio_slow_mode_hv_locked(struct wm_softc *sc)
   17588 {
   17589 	int rv;
   17590 	uint16_t reg;
   17591 
   17592 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   17593 	if (rv != 0)
   17594 		return rv;
   17595 
   17596 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   17597 	    reg | HV_KMRN_MDIO_SLOW);
   17598 }
   17599 
   17600 /*
   17601  *  wm_configure_k1_ich8lan - Configure K1 power state
   17602  *  @sc: pointer to the HW structure
   17603  *  @enable: K1 state to configure
   17604  *
   17605  *  Configure the K1 power state based on the provided parameter.
   17606  *  Assumes semaphore already acquired.
   17607  */
   17608 static void
   17609 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   17610 {
   17611 	uint32_t ctrl, ctrl_ext, tmp;
   17612 	uint16_t kmreg;
   17613 	int rv;
   17614 
   17615 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17616 
   17617 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   17618 	if (rv != 0)
   17619 		return;
   17620 
   17621 	if (k1_enable)
   17622 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   17623 	else
   17624 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   17625 
   17626 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   17627 	if (rv != 0)
   17628 		return;
   17629 
   17630 	delay(20);
   17631 
   17632 	ctrl = CSR_READ(sc, WMREG_CTRL);
   17633 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   17634 
   17635 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   17636 	tmp |= CTRL_FRCSPD;
   17637 
   17638 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   17639 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   17640 	CSR_WRITE_FLUSH(sc);
   17641 	delay(20);
   17642 
   17643 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   17644 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   17645 	CSR_WRITE_FLUSH(sc);
   17646 	delay(20);
   17647 
   17648 	return;
   17649 }
   17650 
   17651 /* special case - for 82575 - need to do manual init ... */
   17652 static void
   17653 wm_reset_init_script_82575(struct wm_softc *sc)
   17654 {
   17655 	/*
   17656 	 * Remark: this is untested code - we have no board without EEPROM
   17657 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   17658 	 */
   17659 
   17660 	/* SerDes configuration via SERDESCTRL */
   17661 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   17662 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   17663 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   17664 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   17665 
   17666 	/* CCM configuration via CCMCTL register */
   17667 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   17668 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   17669 
   17670 	/* PCIe lanes configuration */
   17671 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   17672 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   17673 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   17674 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   17675 
   17676 	/* PCIe PLL Configuration */
   17677 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   17678 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   17679 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   17680 }
   17681 
   17682 static void
   17683 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   17684 {
   17685 	uint32_t reg;
   17686 	uint16_t nvmword;
   17687 	int rv;
   17688 
   17689 	if (sc->sc_type != WM_T_82580)
   17690 		return;
   17691 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   17692 		return;
   17693 
   17694 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   17695 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   17696 	if (rv != 0) {
   17697 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   17698 		    __func__);
   17699 		return;
   17700 	}
   17701 
   17702 	reg = CSR_READ(sc, WMREG_MDICNFG);
   17703 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   17704 		reg |= MDICNFG_DEST;
   17705 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   17706 		reg |= MDICNFG_COM_MDIO;
   17707 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17708 }
   17709 
   17710 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   17711 
   17712 static bool
   17713 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   17714 {
   17715 	uint32_t reg;
   17716 	uint16_t id1, id2;
   17717 	int i, rv;
   17718 
   17719 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17720 		device_xname(sc->sc_dev), __func__));
   17721 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17722 
   17723 	id1 = id2 = 0xffff;
   17724 	for (i = 0; i < 2; i++) {
   17725 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   17726 		    &id1);
   17727 		if ((rv != 0) || MII_INVALIDID(id1))
   17728 			continue;
   17729 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   17730 		    &id2);
   17731 		if ((rv != 0) || MII_INVALIDID(id2))
   17732 			continue;
   17733 		break;
   17734 	}
   17735 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   17736 		goto out;
   17737 
   17738 	/*
   17739 	 * In case the PHY needs to be in mdio slow mode,
   17740 	 * set slow mode and try to get the PHY id again.
   17741 	 */
   17742 	rv = 0;
   17743 	if (sc->sc_type < WM_T_PCH_LPT) {
   17744 		wm_set_mdio_slow_mode_hv_locked(sc);
   17745 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   17746 		    &id1);
   17747 		rv |= wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   17748 		    &id2);
   17749 	}
   17750 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   17751 		device_printf(sc->sc_dev, "XXX return with false\n");
   17752 		return false;
   17753 	}
   17754 out:
   17755 	if (sc->sc_type >= WM_T_PCH_LPT) {
   17756 		/* Only unforce SMBus if ME is not active */
   17757 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   17758 			uint16_t phyreg;
   17759 
   17760 			/* Unforce SMBus mode in PHY */
   17761 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   17762 			    CV_SMB_CTRL, &phyreg);
   17763 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   17764 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   17765 			    CV_SMB_CTRL, phyreg);
   17766 
   17767 			/* Unforce SMBus mode in MAC */
   17768 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17769 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   17770 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17771 		}
   17772 	}
   17773 	return true;
   17774 }
   17775 
   17776 static void
   17777 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   17778 {
   17779 	uint32_t reg;
   17780 	int i;
   17781 
   17782 	/* Set PHY Config Counter to 50msec */
   17783 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   17784 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   17785 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   17786 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   17787 
   17788 	/* Toggle LANPHYPC */
   17789 	reg = CSR_READ(sc, WMREG_CTRL);
   17790 	reg |= CTRL_LANPHYPC_OVERRIDE;
   17791 	reg &= ~CTRL_LANPHYPC_VALUE;
   17792 	CSR_WRITE(sc, WMREG_CTRL, reg);
   17793 	CSR_WRITE_FLUSH(sc);
   17794 	delay(1000);
   17795 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   17796 	CSR_WRITE(sc, WMREG_CTRL, reg);
   17797 	CSR_WRITE_FLUSH(sc);
   17798 
   17799 	if (sc->sc_type < WM_T_PCH_LPT)
   17800 		delay(50 * 1000);
   17801 	else {
   17802 		i = 20;
   17803 
   17804 		do {
   17805 			delay(5 * 1000);
   17806 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   17807 		    && i--);
   17808 
   17809 		delay(30 * 1000);
   17810 	}
   17811 }
   17812 
   17813 static int
   17814 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   17815 {
   17816 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   17817 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   17818 	uint32_t rxa;
   17819 	uint16_t scale = 0, lat_enc = 0;
   17820 	int32_t obff_hwm = 0;
   17821 	int64_t lat_ns, value;
   17822 
   17823 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17824 		device_xname(sc->sc_dev), __func__));
   17825 
   17826 	if (link) {
   17827 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   17828 		uint32_t status;
   17829 		uint16_t speed;
   17830 		pcireg_t preg;
   17831 
   17832 		status = CSR_READ(sc, WMREG_STATUS);
   17833 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   17834 		case STATUS_SPEED_10:
   17835 			speed = 10;
   17836 			break;
   17837 		case STATUS_SPEED_100:
   17838 			speed = 100;
   17839 			break;
   17840 		case STATUS_SPEED_1000:
   17841 			speed = 1000;
   17842 			break;
   17843 		default:
   17844 			device_printf(sc->sc_dev, "Unknown speed "
   17845 			    "(status = %08x)\n", status);
   17846 			return -1;
   17847 		}
   17848 
   17849 		/* Rx Packet Buffer Allocation size (KB) */
   17850 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   17851 
   17852 		/*
   17853 		 * Determine the maximum latency tolerated by the device.
   17854 		 *
   17855 		 * Per the PCIe spec, the tolerated latencies are encoded as
   17856 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   17857 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   17858 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   17859 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   17860 		 */
   17861 		lat_ns = ((int64_t)rxa * 1024 -
   17862 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   17863 			+ ETHER_HDR_LEN))) * 8 * 1000;
   17864 		if (lat_ns < 0)
   17865 			lat_ns = 0;
   17866 		else
   17867 			lat_ns /= speed;
   17868 		value = lat_ns;
   17869 
   17870 		while (value > LTRV_VALUE) {
   17871 			scale ++;
   17872 			value = howmany(value, __BIT(5));
   17873 		}
   17874 		if (scale > LTRV_SCALE_MAX) {
   17875 			device_printf(sc->sc_dev,
   17876 			    "Invalid LTR latency scale %d\n", scale);
   17877 			return -1;
   17878 		}
   17879 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   17880 
   17881 		/* Determine the maximum latency tolerated by the platform */
   17882 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17883 		    WM_PCI_LTR_CAP_LPT);
   17884 		max_snoop = preg & 0xffff;
   17885 		max_nosnoop = preg >> 16;
   17886 
   17887 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   17888 
   17889 		if (lat_enc > max_ltr_enc) {
   17890 			lat_enc = max_ltr_enc;
   17891 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   17892 			    * PCI_LTR_SCALETONS(
   17893 				    __SHIFTOUT(lat_enc,
   17894 					PCI_LTR_MAXSNOOPLAT_SCALE));
   17895 		}
   17896 
   17897 		if (lat_ns) {
   17898 			lat_ns *= speed * 1000;
   17899 			lat_ns /= 8;
   17900 			lat_ns /= 1000000000;
   17901 			obff_hwm = (int32_t)(rxa - lat_ns);
   17902 		}
   17903 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   17904 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   17905 			    "(rxa = %d, lat_ns = %d)\n",
   17906 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   17907 			return -1;
   17908 		}
   17909 	}
   17910 	/* Snoop and No-Snoop latencies the same */
   17911 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   17912 	CSR_WRITE(sc, WMREG_LTRV, reg);
   17913 
   17914 	/* Set OBFF high water mark */
   17915 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   17916 	reg |= obff_hwm;
   17917 	CSR_WRITE(sc, WMREG_SVT, reg);
   17918 
   17919 	/* Enable OBFF */
   17920 	reg = CSR_READ(sc, WMREG_SVCR);
   17921 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   17922 	CSR_WRITE(sc, WMREG_SVCR, reg);
   17923 
   17924 	return 0;
   17925 }
   17926 
   17927 /*
   17928  * I210 Errata 25 and I211 Errata 10
   17929  * Slow System Clock.
   17930  *
   17931  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   17932  */
   17933 static int
   17934 wm_pll_workaround_i210(struct wm_softc *sc)
   17935 {
   17936 	uint32_t mdicnfg, wuc;
   17937 	uint32_t reg;
   17938 	pcireg_t pcireg;
   17939 	uint32_t pmreg;
   17940 	uint16_t nvmword, tmp_nvmword;
   17941 	uint16_t phyval;
   17942 	bool wa_done = false;
   17943 	int i, rv = 0;
   17944 
   17945 	/* Get Power Management cap offset */
   17946 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   17947 	    &pmreg, NULL) == 0)
   17948 		return -1;
   17949 
   17950 	/* Save WUC and MDICNFG registers */
   17951 	wuc = CSR_READ(sc, WMREG_WUC);
   17952 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   17953 
   17954 	reg = mdicnfg & ~MDICNFG_DEST;
   17955 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17956 
   17957 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   17958 		/*
   17959 		 * The default value of the Initialization Control Word 1
   17960 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   17961 		 */
   17962 		nvmword = INVM_DEFAULT_AL;
   17963 	}
   17964 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   17965 
   17966 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   17967 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   17968 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   17969 
   17970 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   17971 			rv = 0;
   17972 			break; /* OK */
   17973 		} else
   17974 			rv = -1;
   17975 
   17976 		wa_done = true;
   17977 		/* Directly reset the internal PHY */
   17978 		reg = CSR_READ(sc, WMREG_CTRL);
   17979 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   17980 
   17981 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17982 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   17983 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17984 
   17985 		CSR_WRITE(sc, WMREG_WUC, 0);
   17986 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   17987 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17988 
   17989 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17990 		    pmreg + PCI_PMCSR);
   17991 		pcireg |= PCI_PMCSR_STATE_D3;
   17992 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17993 		    pmreg + PCI_PMCSR, pcireg);
   17994 		delay(1000);
   17995 		pcireg &= ~PCI_PMCSR_STATE_D3;
   17996 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17997 		    pmreg + PCI_PMCSR, pcireg);
   17998 
   17999 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   18000 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   18001 
   18002 		/* Restore WUC register */
   18003 		CSR_WRITE(sc, WMREG_WUC, wuc);
   18004 	}
   18005 
   18006 	/* Restore MDICNFG setting */
   18007 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   18008 	if (wa_done)
   18009 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   18010 	return rv;
   18011 }
   18012 
   18013 static void
   18014 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   18015 {
   18016 	uint32_t reg;
   18017 
   18018 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   18019 		device_xname(sc->sc_dev), __func__));
   18020 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   18021 	    || (sc->sc_type == WM_T_PCH_CNP));
   18022 
   18023 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   18024 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   18025 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   18026 
   18027 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   18028 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   18029 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   18030 }
   18031 
   18032 /* Sysctl functions */
   18033 static int
   18034 wm_sysctl_tdh_handler(SYSCTLFN_ARGS)
   18035 {
   18036 	struct sysctlnode node = *rnode;
   18037 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   18038 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   18039 	struct wm_softc *sc = txq->txq_sc;
   18040 	uint32_t reg;
   18041 
   18042 	reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id));
   18043 	node.sysctl_data = &reg;
   18044 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   18045 }
   18046 
   18047 static int
   18048 wm_sysctl_tdt_handler(SYSCTLFN_ARGS)
   18049 {
   18050 	struct sysctlnode node = *rnode;
   18051 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   18052 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   18053 	struct wm_softc *sc = txq->txq_sc;
   18054 	uint32_t reg;
   18055 
   18056 	reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id));
   18057 	node.sysctl_data = &reg;
   18058 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   18059 }
   18060 
   18061 #ifdef WM_DEBUG
   18062 static int
   18063 wm_sysctl_debug(SYSCTLFN_ARGS)
   18064 {
   18065 	struct sysctlnode node = *rnode;
   18066 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
   18067 	uint32_t dflags;
   18068 	int error;
   18069 
   18070 	dflags = sc->sc_debug;
   18071 	node.sysctl_data = &dflags;
   18072 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   18073 
   18074 	if (error || newp == NULL)
   18075 		return error;
   18076 
   18077 	sc->sc_debug = dflags;
   18078 	device_printf(sc->sc_dev, "TARC0: %08x\n", CSR_READ(sc, WMREG_TARC0));
   18079 	device_printf(sc->sc_dev, "TDT0: %08x\n", CSR_READ(sc, WMREG_TDT(0)));
   18080 
   18081 	return 0;
   18082 }
   18083 #endif
   18084